Commit 6ac3a017 authored by Tsung-Yu Chan's avatar Tsung-Yu Chan

feat / 2 layer MIMO with ML receiver

parent f15fe822
......@@ -1054,6 +1054,7 @@ set(PHY_SRC_COMMON
${OPENAIR1_DIR}/PHY/TOOLS/dB_routines.c
${OPENAIR1_DIR}/PHY/TOOLS/sqrt.c
${OPENAIR1_DIR}/PHY/TOOLS/lut.c
${OPENAIR1_DIR}/PHY/TOOLS/simde_operations.c
)
set(PHY_SRC
......@@ -1190,6 +1191,7 @@ set(PHY_SRC_UE
${OPENAIR1_DIR}/PHY/TOOLS/dB_routines.c
${OPENAIR1_DIR}/PHY/TOOLS/sqrt.c
${OPENAIR1_DIR}/PHY/TOOLS/lut.c
${OPENAIR1_DIR}/PHY/TOOLS/simde_operations.c
${PHY_POLARSRC}
${PHY_SMALLBLOCKSRC}
${PHY_NR_CODINGIF}
......@@ -1243,6 +1245,7 @@ set(PHY_SRC_UE
${OPENAIR1_DIR}/PHY/TOOLS/dB_routines.c
${OPENAIR1_DIR}/PHY/TOOLS/sqrt.c
${OPENAIR1_DIR}/PHY/TOOLS/lut.c
${OPENAIR1_DIR}/PHY/TOOLS/simde_operations.c
${OPENAIR1_DIR}/PHY/INIT/nr_init_ue.c
# ${OPENAIR1_DIR}/SIMULATION/NR_UE_PHY/unit_tests/src/pucch_uci_test.c
${PHY_POLARSRC}
......@@ -1293,6 +1296,7 @@ set(PHY_MEX_UE
${OPENAIR1_DIR}/PHY/TOOLS/cmult_vv.c
${OPENAIR1_DIR}/PHY/LTE_UE_TRANSPORT/dlsch_llr_computation_avx2.c
${OPENAIR1_DIR}/PHY/TOOLS/signal_energy.c
${OPENAIR1_DIR}/PHY/TOOLS/simde_operations.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_ue_measurements.c
${OPENAIR_DIR}/common/utils/LOG/log.c
${OPENAIR_DIR}/common/utils/T/T.c
......
......@@ -214,6 +214,25 @@ void nr_ulsch_channel_compensation(int **rxdataF_ext,
*/
void nr_idft(int32_t *z, uint32_t Msc_PUSCH);
void nr_ulsch_qpsk_qpsk(c16_t *stream0_in, c16_t *stream1_in, c16_t *stream0_out, c16_t *rho01, uint32_t length);
void nr_ulsch_qam16_qam16(c16_t *stream0_in,
c16_t *stream1_in,
c16_t *ch_mag,
c16_t *ch_mag_i,
c16_t *stream0_out,
c16_t *rho01,
uint32_t length);
void nr_ulsch_qam64_qam64(c16_t *stream0_in,
c16_t *stream1_in,
c16_t *ch_mag,
c16_t *ch_mag_i,
c16_t *stream0_out,
c16_t *rho01,
uint32_t length);
/** \brief This function generates log-likelihood ratios (decoder input) for single-stream QPSK received waveforms.
@param rxdataF_comp Compensated channel output
@param ulsch_llr llr output
......
......@@ -28,7 +28,7 @@
#include <omp.h>
#ifdef __aarch64__
#define USE_128BIT 1
#define USE_128BIT
#endif
static inline
......@@ -53,7 +53,6 @@ int64_t time_now_us(void)
void nr_idft (int32_t *z,
uint32_t Msc_PUSCH)
{
simde__m128i idft_in128[1][3240], idft_out128[1][3240];
simde__m128i norm128;
int16_t *idft_in0 = (int16_t*)idft_in128[0], *idft_out0 = (int16_t*)idft_out128[0];
......@@ -553,20 +552,21 @@ void nr_ulsch_extract_rbs(c16_t **rxdataF,
printf("--------------------symbol = %d-----------------------\n", symbol);
printf("--------------------ch_ext_index = %d-----------------------\n", symbol*NR_NB_SC_PER_RB * pusch_pdu->rb_size);
#endif
int delta=0;
int start_re = (frame_parms->first_carrier_offset + (pusch_pdu->rb_start + pusch_pdu->bwp_start) * NR_NB_SC_PER_RB)%frame_parms->ofdm_symbol_size;
int nb_re_pusch = NR_NB_SC_PER_RB * pusch_pdu->rb_size;
int nb_re_pusch2 = nb_re_pusch + (nb_re_pusch&7);
for (int aatx = 0; aatx < pusch_pdu->nrOfLayers; aatx++)
{
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
{
rxF = (int16_t *)&rxdataF[aarx][soffset+(symbol * frame_parms->ofdm_symbol_size)];
rxF_ext = (int16_t *)&pusch_vars->rxdataF_ext[aarx][symbol * nb_re_pusch2]; // [hna] rxdataF_ext isn't contiguous in order to solve an alignment problem ib llr computation in case of mod_order = 4, 6
ul_ch0 = &pusch_vars->ul_ch_estimates[aarx][pusch_vars->dmrs_symbol*frame_parms->ofdm_symbol_size]; // update channel estimates if new dmrs symbol are available
ul_ch0 = &pusch_vars->ul_ch_estimates[aatx * frame_parms->nb_antennas_rx + aarx][pusch_vars->dmrs_symbol*frame_parms->ofdm_symbol_size]; // update channel estimates if new dmrs symbol are available
ul_ch0_ext = &pusch_vars->ul_ch_estimates_ext[aarx][symbol*nb_re_pusch2];
ul_ch0_ext = &pusch_vars->ul_ch_estimates_ext[aatx * frame_parms->nb_antennas_rx + aarx][symbol*nb_re_pusch2];
if (is_dmrs_symbol == 0)
{
......@@ -813,7 +813,7 @@ void nr_ulsch_extract_rbs(c16_t **rxdataF,
}
}
#endif
}
}
}
......@@ -828,12 +828,6 @@ void nr_ulsch_scale_channel(int **ul_ch_estimates_ext,
int shift_ch_ext)
{
short rb;
unsigned char aarx,aatx;
simde__m128i *ul_ch128;
// Determine scaling amplitude based the symbol
int b = 3;
short ch_amp = 1024 * 8;
......@@ -923,20 +917,6 @@ void nr_ulsch_channel_level(int **ul_ch_estimates_ext,
simde_m_empty();
}
static simde__m128i a_mult_conjb(simde__m128i a, simde__m128i b, unsigned char output_shift)
{
simde__m128i mmtmpD0 = simde_mm_madd_epi16(b, a);
simde__m128i mmtmpD1 = simde_mm_shufflelo_epi16(b, SIMDE_MM_SHUFFLE(2, 3, 0, 1));
mmtmpD1 = simde_mm_shufflehi_epi16(mmtmpD1, SIMDE_MM_SHUFFLE(2, 3, 0, 1));
mmtmpD1 = simde_mm_sign_epi16(mmtmpD1, *(simde__m128i *)&conjugate[0]);
mmtmpD1 = simde_mm_madd_epi16(mmtmpD1, a);
mmtmpD0 = simde_mm_srai_epi32(mmtmpD0, output_shift);
mmtmpD1 = simde_mm_srai_epi32(mmtmpD1, output_shift);
simde__m128i mmtmpD2 = simde_mm_unpacklo_epi32(mmtmpD0, mmtmpD1);
simde__m128i mmtmpD3 = simde_mm_unpackhi_epi32(mmtmpD0, mmtmpD1);
return simde_mm_packs_epi32(mmtmpD2, mmtmpD3);
}
//==============================================================================================
// Pre-processing for LLR computation
//==============================================================================================
......@@ -1313,7 +1293,8 @@ void nr_ulsch_detection_mrc(NR_DL_FRAME_PARMS *frame_parms,
if (frame_parms->nb_antennas_rx>1) {
int nb_re = nb_rb*12;
if ((nb_re&7) > 0) nb_re+=8;
for (int aa=0;aa<frame_parms->nb_antennas_rx;aa++) {
for (int aa=0;aa<frame_parms->nb_antennas_rx;aa++)
{
rxdataF_comp256[aa] = (simde__m256i *)&rxdataF_comp[aa][(symbol*(nb_re + off))];
ul_ch_mag256[aa] = (simde__m256i *)&ul_ch_mag[aa][(symbol*(nb_re + off))];
ul_ch_mag256b[aa] = (simde__m256i *)&ul_ch_magb[aa][(symbol*(nb_re + off))];
......@@ -1334,58 +1315,97 @@ void nr_ulsch_detection_mrc(NR_DL_FRAME_PARMS *frame_parms,
simde_m_empty();
}
void inner_rx_qpsk (int *rxF,
int *ul_ch,
int16_t *llr,
int aarx,
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
(((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
simde__m128i a_mult_conjb(simde__m128i a, simde__m128i b, unsigned char output_shift)
{
simde__m128i mmtmpD0 = simde_mm_madd_epi16(b, a);
simde__m128i mmtmpD1 = simde_mm_shufflelo_epi16(b, _MM_SHUFFLE(2, 3, 0, 1));
mmtmpD1 = simde_mm_shufflehi_epi16(mmtmpD1, _MM_SHUFFLE(2, 3, 0, 1));
mmtmpD1 = simde_mm_sign_epi16(mmtmpD1, *(simde__m128i *)&conjugate[0]);
mmtmpD1 = simde_mm_madd_epi16(mmtmpD1, a);
mmtmpD0 = simde_mm_srai_epi32(mmtmpD0, output_shift);
mmtmpD1 = simde_mm_srai_epi32(mmtmpD1, output_shift);
simde__m128i mmtmpD2 = simde_mm_unpacklo_epi32(mmtmpD0, mmtmpD1);
simde__m128i mmtmpD3 = simde_mm_unpackhi_epi32(mmtmpD0, mmtmpD1);
return simde_mm_packs_epi32(mmtmpD2, mmtmpD3);
}
void inner_rx_qpsk_2layer (NR_DL_FRAME_PARMS *frame_parms,
NR_gNB_PUSCH *pusch_vars,
nfapi_nr_pusch_pdu_t *rel15_ul,
int **rxF,
int **ul_ch,
int16_t **llr,
int nb_layer,
int nb_rx_ant,
int soffset,
int length,
int symbol,
int short nb_rb,
int dmrs_symbol_flag,
int output_shift)
{
#if !USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
for (int i=0; i<((length>>3)+((length&7)>0?1:0)); i++) {
xmmp0 = simde_mm256_madd_epi16(ulch256[i], rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i], complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1, conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1, rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
if (aarx == 0)
int32_t rxFext[nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
int32_t chFext[nb_layer*nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,0); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,1); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,2); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,3); llr64++;
for (int aatx = 0; aatx < nb_layer; aatx++)
{
nr_ulsch_extract_rbs0((c16_t *)rxF[aarx],
pusch_vars->ul_ch_estimates[aatx * nb_rx_ant + aarx],
rxFext[aarx],
chFext[aatx * nb_rx_ant + aarx],
soffset+(symbol * frame_parms->ofdm_symbol_size),
pusch_vars->dmrs_symbol * frame_parms->ofdm_symbol_size,
aarx,
dmrs_symbol_flag,
rel15_ul,
frame_parms);
}
else
}
int32_t rho[nb_rx_ant][nb_layer*nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
int32_t rxFext_comp[nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,0))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,1))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,2))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,3))); llr64++;
for (int aatx = 0; aatx < nb_layer; aatx++)
{
simde__m128i mmtmpD0, mmtmpD1, mmtmpD2, mmtmpD3;
for (int atx = 0; atx < nb_layer; atx++)
{
simde__m128i *rho128 = (simde__m128i *)rho[0][aatx*nb_layer+atx];
simde__m128i *ul_ch128 = (simde__m128i *)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *ul_ch128_2 = (simde__m128i *)chFext[atx * nb_rx_ant + aarx];
for (int i = 0; i < nb_rb*3; i++)
{
// multiply by conjugated channel
mmtmpD0 = simde_mm_madd_epi16(ul_ch128[i], ul_ch128_2[i]);
// mmtmpD0 contains real part of 4 consecutive outputs (32-bit)
mmtmpD1 = simde_mm_shufflelo_epi16(ul_ch128[i], SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_shufflehi_epi16(mmtmpD1, SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_sign_epi16(mmtmpD1, *(simde__m128i*)&conjugate[0]);
mmtmpD1 = simde_mm_madd_epi16(mmtmpD1, ul_ch128_2[0]);
// mmtmpD1 contains imag part of 4 consecutive outputs (32-bit)
mmtmpD0 = simde_mm_srai_epi32(mmtmpD0, output_shift);
mmtmpD1 = simde_mm_srai_epi32(mmtmpD1, output_shift);
mmtmpD2 = simde_mm_unpacklo_epi32(mmtmpD0, mmtmpD1);
mmtmpD3 = simde_mm_unpackhi_epi32(mmtmpD0, mmtmpD1);
if (aarx == 0)
rho128[i] = simde_mm_packs_epi32(mmtmpD2, mmtmpD3);
else
rho128[i] = simde_mm_adds_epi16(rho128[i], simde_mm_packs_epi32(mmtmpD2, mmtmpD3));
}
}
#else
register simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
// compensation
simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i *llr128 = (simde__m128*)llr;
for (int i = 0; i < (length >> 2); i++) {
simde__m128i *rxF128 = (simde__m128i*)rxFext[aarx];
simde__m128i *ulch128 = (simde__m128i*)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *rxF_comp128 = (simde__m128i*)rxFext_comp[aatx];
for (int i = 0; i < (length >> 2); i++)
{
xmmp0 = simde_mm_madd_epi16(ulch128[i], rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
......@@ -1397,22 +1417,24 @@ void inner_rx_qpsk (int *rxF,
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
// xmmp4 = a_mult_conjb(rxF128[i], ulch128[i], output_shift);
if (aarx == 0)
*llr128 = xmmp4;
*rxF_comp128 = xmmp4;
else
*llr128 = simde_mm_add_epi16(*llr128, xmmp4);
llr128++;
*rxF_comp128 = simde_mm_adds_epi16(*rxF_comp128, xmmp4);
rxF_comp128++;
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64* llr64 = (simde__m64*)llr128;
simde__m64* rxF_comp64 = (simde__m64*)rxF_comp128;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5, 4, 7, 6, 1, 0, 3, 2);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM16_n1);
simde__m64 *rxF64 = (simde__m64*)rxFext[aarx];
simde__m64 *ulch64 = (simde__m64*)chFext[aatx * nb_rx_ant + aarx];
xmm0 = simde_mm_madd_pi16(ulch64[i], rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
......@@ -1427,119 +1449,554 @@ void inner_rx_qpsk (int *rxF,
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
if (aarx == 0)
*llr64 = xmm4;
*rxF_comp64 = xmm4;
else
*llr64 = simde_mm_add_pi16(*llr64, xmm4);
*rxF_comp64 = simde_mm_add_pi16(*rxF_comp64, xmm4);
}
#endif
}
}
c16_t *rho0 = (c16_t *)rho[0][1];
c16_t *rho1 = (c16_t *)rho[0][2];
c16_t *llr_0 = (c16_t *)&llr[0][pusch_vars->llr_offset[symbol]];
c16_t *llr_1 = (c16_t *)&llr[1][pusch_vars->llr_offset[symbol]];
nr_ulsch_qpsk_qpsk((c16_t *)rxFext_comp[0], (c16_t *)rxFext_comp[1], llr_0, rho0, length);
nr_ulsch_qpsk_qpsk((c16_t *)rxFext_comp[1], (c16_t *)rxFext_comp[0], llr_1, rho1, length);
}
void inner_rx_256qam (int *rxF,
int *ul_ch,
int16_t *llr,
int aarx,
void inner_rx_16qam_2layer (NR_DL_FRAME_PARMS *frame_parms,
NR_gNB_PUSCH *pusch_vars,
nfapi_nr_pusch_pdu_t *rel15_ul,
int **rxF,
int **ul_ch,
int16_t **llr,
int nb_layer,
int nb_rx_ant,
int soffset,
int length,
int symbol,
int short nb_rb,
int dmrs_symbol_flag,
int output_shift)
{
#if !USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5, xmmp6, xmmp7, xmmp8, xmmp9;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM256_n1);
register simde__m256i QAM_amp256b = simde_mm256_set1_epi16(QAM256_n2);
register simde__m256i QAM_amp256c = simde_mm256_set1_epi16(QAM256_n3);
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
simde__m256i *llr256 = (simde__m256i *)llr;
for (int i = 0; i < ((length >> 3) + (( length & 7) > 0 ? 1 : 0)); i++)
int32_t rxFext[nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
int32_t chFext[nb_layer*nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
xmmp0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1,conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1,rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1,output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0,xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0,xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
// compute channel amplitude for LLR
xmmp0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp0 = simde_mm256_packs_epi32(xmmp0,xmmp0); // contains 16 LLRs
xmmp2 = simde_mm256_unpacklo_epi16(xmmp0,xmmp0);
xmmp1 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256);
xmmp6 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256b);
xmmp8 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256c);
for (int aatx = 0; aatx < nb_layer; aatx++)
{
nr_ulsch_extract_rbs0((c16_t *)rxF[aarx],
pusch_vars->ul_ch_estimates[aatx * nb_rx_ant + aarx],
rxFext[aarx],
chFext[aatx * nb_rx_ant + aarx],
soffset+(symbol * frame_parms->ofdm_symbol_size),
pusch_vars->dmrs_symbol * frame_parms->ofdm_symbol_size,
aarx,
dmrs_symbol_flag,
rel15_ul,
frame_parms);
}
}
xmmp2 = simde_mm256_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm256_subs_epi16(xmmp1,xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmp2 contains 16 LLRs
xmmp7 = simde_mm256_abs_epi16(xmmp2);
xmmp7 = simde_mm256_subs_epi16(xmmp6,xmmp7); // contains 16 LLRs
xmmp9 = simde_mm256_abs_epi16(xmmp7);
xmmp9 = simde_mm256_subs_epi16(xmmp8,xmmp9); // contains 16 LLRs
int32_t rho[nb_rx_ant][nb_layer*nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
int32_t rxFext_comp[nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
int32_t ul_ch_mag[nb_layer][length+8] __attribute__((aligned(32)));
for (int aatx = 0; aatx < nb_layer; aatx++)
{
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
simde__m128i mmtmpD0, mmtmpD1, mmtmpD2, mmtmpD3;
for (int atx = 0; atx < nb_layer; atx++)
{
simde__m128i *rho128 = (simde__m128i *)rho[0][aatx*nb_layer+atx];
simde__m128i *ul_ch128 = (simde__m128i *)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *ul_ch128_2 = (simde__m128i *)chFext[atx * nb_rx_ant + aarx];
for (int i = 0; i < nb_rb*3; i++)
{
// multiply by conjugated channel
mmtmpD0 = simde_mm_madd_epi16(ul_ch128[i], ul_ch128_2[i]);
// mmtmpD0 contains real part of 4 consecutive outputs (32-bit)
mmtmpD1 = simde_mm_shufflelo_epi16(ul_ch128[i], SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_shufflehi_epi16(mmtmpD1, SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_sign_epi16(mmtmpD1, *(simde__m128i*)&conjugate[0]);
mmtmpD1 = simde_mm_madd_epi16(mmtmpD1, ul_ch128_2[0]);
// mmtmpD1 contains imag part of 4 consecutive outputs (32-bit)
mmtmpD0 = simde_mm_srai_epi32(mmtmpD0, output_shift);
mmtmpD1 = simde_mm_srai_epi32(mmtmpD1, output_shift);
mmtmpD2 = simde_mm_unpacklo_epi32(mmtmpD0, mmtmpD1);
mmtmpD3 = simde_mm_unpackhi_epi32(mmtmpD0, mmtmpD1);
if (aarx == 0)
rho128[i] = simde_mm_packs_epi32(mmtmpD2, mmtmpD3);
else
rho128[i] = simde_mm_adds_epi16(rho128[i], simde_mm_packs_epi32(mmtmpD2, mmtmpD3));
}
}
// compensation
simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
register simde__m128i QAM_amp128 = simde_mm_set1_epi16(QAM16_n1); // 2/sqrt(10)
simde__m128i *rxF128 = (simde__m128i*)rxFext[aarx];
simde__m128i *ulch128 = (simde__m128i*)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *rxF_comp128 = (simde__m128i*)rxFext_comp[aatx];
simde__m128i *ul_ch_mag128 = (simde__m128i*)ul_ch_mag[aatx];
for (int i = 0; i < (length >> 2); i++)
{
xmmp0 = simde_mm_madd_epi16(ulch128[i], rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_sign_epi16(xmmp1, conj128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
// xmmp4 A0 A1 A2 A3 A4 A5 A6 A7
// xmmp2 B0 B1 B2 B3 B4 B5 B6 B7
// xmmp7 C0 C1 C2 C3 C4 C5 C6 C7
// xmmp9 D0 D1 D2 D3 D4 D5 D6 D7
xmmp1 = simde_mm256_unpacklo_epi32(xmmp4,xmmp2); // A0 B0 A1 B1 A4 B4 A5 B5
xmmp3 = simde_mm256_unpackhi_epi32(xmmp4,xmmp2); // A2 B2 A3 B3 A6 B6 A7 B7
xmmp5 = simde_mm256_unpacklo_epi32(xmmp7,xmmp9); // C0 D0 C1 D1 C4 D4 C5 D5
xmmp6 = simde_mm256_unpackhi_epi32(xmmp7,xmmp9); // C2 D2 C3 D3 C6 D6 C7 D7
xmmp0 = simde_mm_madd_epi16(ulch128[i], ulch128[i]); // |h|^2
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp0 = simde_mm_packs_epi32(xmmp0, xmmp0);
xmmp1 = simde_mm_unpacklo_epi16(xmmp0, xmmp0);
xmmp1 = simde_mm_mulhrs_epi16(xmmp1, QAM_amp128);
xmmp2 = simde_mm256_unpacklo_epi64(xmmp1,xmmp5); // A0 B0 C0 D0 A4 B4 C4 D4
xmmp4 = simde_mm256_unpackhi_epi64(xmmp1,xmmp5); // A1 B1 C1 D1 A5 B5 C5 D5
xmmp1 = simde_mm256_unpacklo_epi64(xmmp3,xmmp6); // A2 B2 C2 D2 A6 B6 C6 D6
xmmp5 = simde_mm256_unpackhi_epi64(xmmp3,xmmp6); // A3 B3 C3 D3 A7 B7 C7 D7
if (aarx == 0)
{
llr256[0] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
*rxF_comp128 = xmmp4;
*ul_ch_mag128 = xmmp1;
}
else
{
llr256[0] = simde_mm256_adds_epi16(llr256[0],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20)); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_adds_epi16(llr256[1],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20)); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_adds_epi16(llr256[2],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31)); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_adds_epi16(llr256[3],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31)); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
*rxF_comp128 = simde_mm_adds_epi16(*rxF_comp128, xmmp4);
*ul_ch_mag128 = simde_mm_adds_epi16(*ul_ch_mag128, xmmp1);
}
rxF_comp128++;
ul_ch_mag128++;
}
simde__m128i *llr128 = (simde__m128i*)llr256;
if ((length&7) >= 4) { //there is a single 128-bit input element remaining
int nb_re128 = length>>2;
simde__m128i xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6;
simde__m128i complex_shuffle128 = simde_mm_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
simde__m128i conj128 = simde_mm_set_epi16(1,-1,1,-1,1,-1,1,-1);
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i QAM_amp = simde_mm_set1_epi16(QAM256_n1); // 2/sqrt(10)
simde__m128i QAM_ampb = simde_mm_set1_epi16(QAM256_n2);
simde__m128i QAM_ampc = simde_mm_set1_epi16(QAM256_n3);
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64* rxF_comp64 = (simde__m64*)rxF_comp128;
simde__m64* ul_ch_mag64 = (simde__m64*)ul_ch_mag128;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxFext[aarx];
simde__m64 *ulch64 = (simde__m64*)chFext[aatx * nb_rx_ant + aarx];
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM16_n1);
xmm0 = simde_mm_madd_epi16(ulch128[nb_re128-1],rxF128[nb_re128-1]);
xmm0 = simde_mm_madd_pi16(ulch64[i], rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_epi8(ulch128[nb_re128-1],complex_shuffle128);
xmm1 = simde_mm_sign_epi16(xmm1,conj128);
xmm1 = simde_mm_madd_epi16(xmm1,rxF128[nb_re128-1]);
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_sign_pi16(xmm1, conj64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm1 = simde_mm_srai_epi32(xmm1,output_shift);
xmm2 = simde_mm_unpacklo_epi32(xmm0,xmm1);
xmm3 = simde_mm_unpackhi_epi32(xmm0,xmm1);
xmm4 = simde_mm_packs_epi32(xmm2,xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_epi16(ulch128[nb_re128-1],ulch128[nb_re128-1]);
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_pi16(ulch64[i], ulch64[i]); // |h|^2
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm0 = simde_mm_packs_pi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_pi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2, QAM_amp);
if (aarx == 0)
{
*rxF_comp64 = xmm4;
*ul_ch_mag64 = xmm1;
}
else
{
*rxF_comp64 = simde_mm_add_pi16(*rxF_comp64, xmm4);
*ul_ch_mag64 = simde_mm_add_pi16(*ul_ch_mag64, xmm1);
}
}
}
}
c16_t *rho0 = (c16_t *)rho[0][1];
c16_t *rho1 = (c16_t *)rho[0][2];
c16_t *llr_0 = (c16_t *)&llr[0][pusch_vars->llr_offset[symbol]];
c16_t *llr_1 = (c16_t *)&llr[1][pusch_vars->llr_offset[symbol]];
c16_t *ul_ch_mag0 = (c16_t *)ul_ch_mag[0];
c16_t *ul_ch_mag1 = (c16_t *)ul_ch_mag[1];
nr_ulsch_qam16_qam16((c16_t *)rxFext_comp[0], (c16_t *)rxFext_comp[1], ul_ch_mag0, ul_ch_mag1, llr_0, rho0, length);
nr_ulsch_qam16_qam16((c16_t *)rxFext_comp[1], (c16_t *)rxFext_comp[0], ul_ch_mag1, ul_ch_mag0, llr_1, rho1, length);
}
void inner_rx_64qam_2layer (NR_DL_FRAME_PARMS *frame_parms,
NR_gNB_PUSCH *pusch_vars,
nfapi_nr_pusch_pdu_t *rel15_ul,
int **rxF,
int **ul_ch,
int16_t **llr,
int nb_layer,
int nb_rx_ant,
int soffset,
int length,
int symbol,
int short nb_rb,
int dmrs_symbol_flag,
int output_shift)
{
int32_t rxFext[nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
int32_t chFext[nb_layer*nb_rx_ant][nb_rb*12+8] __attribute__((aligned(32)));
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
for (int aatx = 0; aatx < nb_layer; aatx++)
{
nr_ulsch_extract_rbs0((c16_t *)rxF[aarx],
pusch_vars->ul_ch_estimates[aatx * nb_rx_ant + aarx],
rxFext[aarx],
chFext[aatx * nb_rx_ant + aarx],
soffset+(symbol * frame_parms->ofdm_symbol_size),
pusch_vars->dmrs_symbol * frame_parms->ofdm_symbol_size,
aarx,
dmrs_symbol_flag,
rel15_ul,
frame_parms);
}
}
int32_t rho[nb_rx_ant][nb_layer*nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
int32_t rxFext_comp[nb_layer][nb_rb*12+8] __attribute__((aligned(32)));
int32_t ul_ch_mag[nb_layer][length+8] __attribute__((aligned(32)));
for (int aatx = 0; aatx < nb_layer; aatx++)
{
for (int aarx = 0; aarx < nb_rx_ant; aarx++)
{
simde__m128i mmtmpD0, mmtmpD1, mmtmpD2, mmtmpD3;
for (int atx = 0; atx < nb_layer; atx++)
{
simde__m128i *rho128 = (simde__m128i *)rho[0][aatx*nb_layer+atx];
simde__m128i *ul_ch128 = (simde__m128i *)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *ul_ch128_2 = (simde__m128i *)chFext[atx * nb_rx_ant + aarx];
for (int i = 0; i < nb_rb*3; i++)
{
// multiply by conjugated channel
mmtmpD0 = simde_mm_madd_epi16(ul_ch128[i], ul_ch128_2[i]);
// mmtmpD0 contains real part of 4 consecutive outputs (32-bit)
mmtmpD1 = simde_mm_shufflelo_epi16(ul_ch128[i], SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_shufflehi_epi16(mmtmpD1, SIMDE_MM_SHUFFLE(2,3,0,1));
mmtmpD1 = simde_mm_sign_epi16(mmtmpD1, *(simde__m128i*)&conjugate[0]);
mmtmpD1 = simde_mm_madd_epi16(mmtmpD1, ul_ch128_2[0]);
// mmtmpD1 contains imag part of 4 consecutive outputs (32-bit)
mmtmpD0 = simde_mm_srai_epi32(mmtmpD0, output_shift);
mmtmpD1 = simde_mm_srai_epi32(mmtmpD1, output_shift);
mmtmpD2 = simde_mm_unpacklo_epi32(mmtmpD0, mmtmpD1);
mmtmpD3 = simde_mm_unpackhi_epi32(mmtmpD0, mmtmpD1);
if (aarx == 0)
rho128[i] = simde_mm_packs_epi32(mmtmpD2, mmtmpD3);
else
rho128[i] = simde_mm_adds_epi16(rho128[i], simde_mm_packs_epi32(mmtmpD2, mmtmpD3));
}
}
// compensation
simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
register simde__m128i QAM_amp128 = simde_mm_set1_epi16(QAM64_n1); // 2/sqrt(10)
simde__m128i *rxF128 = (simde__m128i*)rxFext[aarx];
simde__m128i *ulch128 = (simde__m128i*)chFext[aatx * nb_rx_ant + aarx];
simde__m128i *rxF_comp128 = (simde__m128i*)rxFext_comp[aatx];
simde__m128i *ul_ch_mag128 = (simde__m128i*)ul_ch_mag[aatx];
for (int i = 0; i < (length >> 2); i++)
{
xmmp0 = simde_mm_madd_epi16(ulch128[i], rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_sign_epi16(xmmp1, conj128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
xmmp0 = simde_mm_madd_epi16(ulch128[i], ulch128[i]); // |h|^2
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp0 = simde_mm_packs_epi32(xmmp0, xmmp0);
xmmp1 = simde_mm_unpacklo_epi16(xmmp0, xmmp0);
xmmp1 = simde_mm_mulhrs_epi16(xmmp1, QAM_amp128);
if (aarx == 0)
{
*rxF_comp128 = xmmp4;
*ul_ch_mag128 = xmmp1;
}
else
{
*rxF_comp128 = simde_mm_adds_epi16(*rxF_comp128, xmmp4);
*ul_ch_mag128 = simde_mm_adds_epi16(*ul_ch_mag128, xmmp1);
}
rxF_comp128++;
ul_ch_mag128++;
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64* rxF_comp64 = (simde__m64*)rxF_comp128;
simde__m64* ul_ch_mag64 = (simde__m64*)ul_ch_mag128;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxFext[aarx];
simde__m64 *ulch64 = (simde__m64*)chFext[aatx * nb_rx_ant + aarx];
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM64_n1);
xmm0 = simde_mm_madd_pi16(ulch64[i], rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_sign_pi16(xmm1, conj64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_pi16(ulch64[i], ulch64[i]); // |h|^2
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm0 = simde_mm_packs_pi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_pi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2, QAM_amp);
if (aarx == 0)
{
*rxF_comp64 = xmm4;
*ul_ch_mag64 = xmm1;
}
else
{
*rxF_comp64 = simde_mm_add_pi16(*rxF_comp64, xmm4);
*ul_ch_mag64 = simde_mm_add_pi16(*ul_ch_mag64, xmm1);
}
}
}
}
c16_t *rho0 = (c16_t *)rho[0][1];
c16_t *rho1 = (c16_t *)rho[0][2];
c16_t *llr_0 = (c16_t *)&llr[0][pusch_vars->llr_offset[symbol]];
c16_t *llr_1 = (c16_t *)&llr[1][pusch_vars->llr_offset[symbol]];
c16_t *ul_ch_mag0 = (c16_t *)ul_ch_mag[0];
c16_t *ul_ch_mag1 = (c16_t *)ul_ch_mag[1];
nr_ulsch_qam64_qam64((c16_t *)rxFext_comp[0], (c16_t *)rxFext_comp[1], ul_ch_mag0, ul_ch_mag1, llr_0, rho0, length);
nr_ulsch_qam64_qam64((c16_t *)rxFext_comp[1], (c16_t *)rxFext_comp[0], ul_ch_mag1, ul_ch_mag0, llr_1, rho1, length);
}
void inner_rx_qpsk (int *rxF,
int *ul_ch,
int16_t *llr,
int aarx,
int length,
int output_shift)
{
#ifndef USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
for (int i=0; i<((length>>3)+((length&7)>0?1:0)); i++) {
xmmp0 = simde_mm256_madd_epi16(ulch256[i], rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i], complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1, conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1, rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
if (aarx == 0)
{
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,0); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,1); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,2); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,3); llr64++;
}
else
{
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,0))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,1))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,2))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,3))); llr64++;
}
}
#else
simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i *llr128 = (simde__m128*)llr;
for (int i = 0; i < (length >> 2); i++) {
xmmp0 = simde_mm_madd_epi16(ulch128[i], rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_sign_epi16(xmmp1, conj128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
if (aarx == 0)
*llr128 = xmmp4;
else
*llr128 = simde_mm_add_epi16(*llr128, xmmp4);
llr128++;
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64* llr64 = (simde__m64*)llr128;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5, 4, 7, 6, 1, 0, 3, 2);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
xmm0 = simde_mm_madd_pi16(ulch64[i], rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_sign_pi16(xmm1, conj64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
if (aarx == 0)
*llr64 = xmm4;
else
*llr64 = simde_mm_add_pi16(*llr64, xmm4);
}
#endif
}
void inner_rx_256qam (int *rxF,
int *ul_ch,
int16_t *llr,
int aarx,
int length,
int output_shift)
{
#ifndef USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5, xmmp6, xmmp7, xmmp8, xmmp9;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM256_n1);
register simde__m256i QAM_amp256b = simde_mm256_set1_epi16(QAM256_n2);
register simde__m256i QAM_amp256c = simde_mm256_set1_epi16(QAM256_n3);
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
simde__m256i *llr256 = (simde__m256i *)llr;
for (int i = 0; i < ((length >> 3) + (( length & 7) > 0 ? 1 : 0)); i++)
{
xmmp0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1,conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1,rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1,output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0,xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0,xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
// compute channel amplitude for LLR
xmmp0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp0 = simde_mm256_packs_epi32(xmmp0,xmmp0); // contains 16 LLRs
xmmp2 = simde_mm256_unpacklo_epi16(xmmp0,xmmp0);
xmmp1 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256);
xmmp6 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256b);
xmmp8 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256c);
xmmp2 = simde_mm256_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm256_subs_epi16(xmmp1,xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmp2 contains 16 LLRs
xmmp7 = simde_mm256_abs_epi16(xmmp2);
xmmp7 = simde_mm256_subs_epi16(xmmp6,xmmp7); // contains 16 LLRs
xmmp9 = simde_mm256_abs_epi16(xmmp7);
xmmp9 = simde_mm256_subs_epi16(xmmp8,xmmp9); // contains 16 LLRs
// xmmp4 A0 A1 A2 A3 A4 A5 A6 A7
// xmmp2 B0 B1 B2 B3 B4 B5 B6 B7
// xmmp7 C0 C1 C2 C3 C4 C5 C6 C7
// xmmp9 D0 D1 D2 D3 D4 D5 D6 D7
xmmp1 = simde_mm256_unpacklo_epi32(xmmp4,xmmp2); // A0 B0 A1 B1 A4 B4 A5 B5
xmmp3 = simde_mm256_unpackhi_epi32(xmmp4,xmmp2); // A2 B2 A3 B3 A6 B6 A7 B7
xmmp5 = simde_mm256_unpacklo_epi32(xmmp7,xmmp9); // C0 D0 C1 D1 C4 D4 C5 D5
xmmp6 = simde_mm256_unpackhi_epi32(xmmp7,xmmp9); // C2 D2 C3 D3 C6 D6 C7 D7
xmmp2 = simde_mm256_unpacklo_epi64(xmmp1,xmmp5); // A0 B0 C0 D0 A4 B4 C4 D4
xmmp4 = simde_mm256_unpackhi_epi64(xmmp1,xmmp5); // A1 B1 C1 D1 A5 B5 C5 D5
xmmp1 = simde_mm256_unpacklo_epi64(xmmp3,xmmp6); // A2 B2 C2 D2 A6 B6 C6 D6
xmmp5 = simde_mm256_unpackhi_epi64(xmmp3,xmmp6); // A3 B3 C3 D3 A7 B7 C7 D7
if (aarx == 0)
{
llr256[0] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
}
else
{
llr256[0] = simde_mm256_adds_epi16(llr256[0],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20)); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_adds_epi16(llr256[1],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20)); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_adds_epi16(llr256[2],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31)); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_adds_epi16(llr256[3],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31)); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
}
}
simde__m128i *llr128 = (simde__m128i*)llr256;
if ((length&7) >= 4) { //there is a single 128-bit input element remaining
int nb_re128 = length>>2;
simde__m128i xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6;
simde__m128i complex_shuffle128 = simde_mm_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
simde__m128i conj128 = simde_mm_set_epi16(1,-1,1,-1,1,-1,1,-1);
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i QAM_amp = simde_mm_set1_epi16(QAM256_n1); // 2/sqrt(10)
simde__m128i QAM_ampb = simde_mm_set1_epi16(QAM256_n2);
simde__m128i QAM_ampc = simde_mm_set1_epi16(QAM256_n3);
xmm0 = simde_mm_madd_epi16(ulch128[nb_re128-1],rxF128[nb_re128-1]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_epi8(ulch128[nb_re128-1],complex_shuffle128);
xmm1 = simde_mm_sign_epi16(xmm1,conj128);
xmm1 = simde_mm_madd_epi16(xmm1,rxF128[nb_re128-1]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm1 = simde_mm_srai_epi32(xmm1,output_shift);
xmm2 = simde_mm_unpacklo_epi32(xmm0,xmm1);
xmm3 = simde_mm_unpackhi_epi32(xmm0,xmm1);
xmm4 = simde_mm_packs_epi32(xmm2,xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_epi16(ulch128[nb_re128-1],ulch128[nb_re128-1]);
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm0 = simde_mm_packs_epi32(xmm0,xmm0); // contains 16 LLRs
xmm2 = simde_mm_unpacklo_epi16(xmm0,xmm0);
xmm1 = simde_mm_mulhrs_epi16(xmm2,QAM_amp);
......@@ -1703,7 +2160,7 @@ void inner_rx_256qam (int *rxF,
void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict llr, int aarx, int length,int output_shift)
{
#if !USE_128BIT
#ifndef USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp6, xmmp7;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29, 28, 31, 30, 25, 24, 27, 26, 21, 20, 23, 22, 17, 16, 19, 18, 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m256i conj256 = simde_mm256_set_epi16(1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1);
......@@ -1836,9 +2293,9 @@ void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(-1,1,-1,1);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM64_n1);
......@@ -1886,7 +2343,7 @@ void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict
}
void inner_rx_16qam( int * rxF, int * ul_ch, int16_t * llr, int aarx, int length,int output_shift) {
#if !USE_128BIT
#ifndef USE_128BIT
register simde__m256i xmmp0,xmmp1,xmmp2,xmmp3,xmmp4,xmmp5;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
......@@ -1957,7 +2414,7 @@ void inner_rx_16qam( int * rxF, int * ul_ch, int16_t * llr, int aarx, int length
}
}
#else
register simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5;
simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
......@@ -2012,9 +2469,9 @@ void inner_rx_16qam( int * rxF, int * ul_ch, int16_t * llr, int aarx, int length
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(-1,1,-1,1);
simde__m64 conj64 = simde_mm_set_pi16(1, -1, 1, -1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM16_n1);
......@@ -2069,7 +2526,7 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
nfapi_nr_pusch_pdu_t *rel15_ul = rdata->rel15_ul;
int ulsch_id = rdata->ulsch_id;
int slot = rdata->slot;
int16_t *llr = rdata->llr;
NR_gNB_PUSCH *pusch_vars = &gNB->pusch_vars[ulsch_id];
int16_t *s = rdata->s;
for (int symbol = rdata->startSymbol; symbol < rdata->startSymbol+rdata->numSymbols; symbol++)
{
......@@ -2089,7 +2546,9 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
LOG_I(PHY,"symbol %d: nb_re_pusch %d, DMRS symbl used for Chest :%d \n", symbol, nb_re_pusch, gNB->pusch_vars[ulsch_id].dmrs_symbol);
if (nb_re_pusch == 0) continue;
if (rel15_ul->nrOfLayers == 1)
{
int16_t *llr = &rdata->llr[0][pusch_vars->llr_offset[symbol]];
void (*inner_rx)(int *,int *,int16_t *,int,int,int);
if (rel15_ul->qam_mod_order == 2) inner_rx = inner_rx_qpsk;
else if (rel15_ul->qam_mod_order == 4) inner_rx = inner_rx_16qam;
......@@ -2101,7 +2560,7 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
int soffset = (slot&3)*frame_parms->symbols_per_slot*frame_parms->ofdm_symbol_size;
int32_t rxFext[nb_re_pusch+8] __attribute__((aligned(32)));
int32_t chFext[nb_re_pusch+8] __attribute__((aligned(32)));
int16_t llr16[(nb_re_pusch*rel15_ul->qam_mod_order)+16] __attribute__((aligned(32)));
// int16_t llr16[(nb_re_pusch*rel15_ul->qam_mod_order)+16] __attribute__((aligned(32)));
for (int aa=0;aa<frame_parms->nb_antennas_rx;aa++) {
nr_ulsch_extract_rbs0(gNB->common_vars.rxdataF[aa],
gNB->pusch_vars[ulsch_id].ul_ch_estimates[aa],
......@@ -2114,157 +2573,60 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
rel15_ul,
frame_parms);
// demodulation
inner_rx(rxFext, chFext, llr16, aa, nb_re_pusch, gNB->pusch_vars[ulsch_id].log2_maxh);
inner_rx(rxFext, chFext, llr, aa, nb_re_pusch, gNB->pusch_vars[ulsch_id].log2_maxh);
}
// unscrambling
simde__m64 *llr64 = (simde__m64 *) llr;
for (int i=0;i<(nb_re_pusch*rel15_ul->qam_mod_order)>>2;i++) {
llr64[i] = simde_mm_mullo_pi16(((simde__m64 *)llr16)[i],((simde__m64 *)s)[i]);
}
s+=(nb_re_pusch*rel15_ul->qam_mod_order);
llr+=(nb_re_pusch*rel15_ul->qam_mod_order);
}
// int64_t end = time_now_us();
// printf("Elapsed time = %ld tstamp %ld id %lu \n", end - now, end, pthread_self());
}
/*
void nr_pusch_symbol_processing(void *arg) {
puschSymbolProc_t *rdata=(puschSymbolProc_t*)arg;
PHY_VARS_gNB *gNB=rdata->gNB;
NR_DL_FRAME_PARMS *frame_parms=rdata->frame_parms;
nfapi_nr_pusch_pdu_t *rel15_ul=rdata->rel15_ul;
int ulsch_id=rdata->ulsch_id;
int slot=rdata->slot;
int symbol=rdata->symbol;
int dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
int nb_re_pusch = gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol];
if (dmrs_symbol_flag == 1) {
if ((rel15_ul->ul_dmrs_symb_pos >> ((symbol + 1) % frame_parms->symbols_per_slot)) & 0x01)
AssertFatal(1==0,"Double DMRS configuration is not yet supported\n");
gNB->pusch_vars[ulsch_id]->dmrs_symbol = symbol;
}
LOG_D(PHY,"symbol %d: nb_re_pusch %d, DMRS symbl used for Chest :%d \n", symbol, nb_re_pusch, gNB->pusch_vars[ulsch_id]->dmrs_symbol);
#ifdef __AVX2__
int off = ((rel15_ul->rb_size&1) == 1)? 4:0;
#else
int off = 0;
#endif
//----------------------------------------------------------
//--------------------- RBs extraction ---------------------
//----------------------------------------------------------
if (nb_re_pusch > 0) {
if (gNB->pusch_vars[ulsch_id]->extraction_done[symbol]!=1) {
start_meas(&gNB->ulsch_rbs_extraction_stats);
nr_ulsch_extract_rbs(gNB->common_vars.rxdataF,
gNB->pusch_vars[ulsch_id],
slot,
symbol,
dmrs_symbol_flag,
rel15_ul,
frame_parms);
stop_meas(&gNB->ulsch_rbs_extraction_stats);
}
//----------------------------------------------------------
//--------------------- Channel Scaling --------------------
//----------------------------------------------------------
nr_ulsch_scale_channel(gNB->pusch_vars[ulsch_id]->ul_ch_estimates_ext,
frame_parms,
gNB->ulsch[ulsch_id],
symbol,
dmrs_symbol_flag,
nb_re_pusch,
rel15_ul->nrOfLayers,
rel15_ul->rb_size);
//----------------------------------------------------------
//--------------------- Channel Compensation ---------------
//----------------------------------------------------------
start_meas(&gNB->ulsch_channel_compensation_stats);
LOG_D(PHY,"Doing channel compensations log2_maxh %d\n",gNB->pusch_vars[ulsch_id]->log2_maxh);
nr_ulsch_channel_compensation(gNB->pusch_vars[ulsch_id]->rxdataF_ext,
gNB->pusch_vars[ulsch_id]->ul_ch_estimates_ext,
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
gNB->pusch_vars[ulsch_id]->rxdataF_comp,
(rel15_ul->nrOfLayers>1) ? gNB->pusch_vars[ulsch_id]->rho : NULL,
frame_parms,
symbol,
nb_re_pusch,
dmrs_symbol_flag,
rel15_ul->dmrs_config_type,
rel15_ul->qam_mod_order,
rel15_ul->nrOfLayers,
rel15_ul->rb_size,
gNB->pusch_vars[ulsch_id]->log2_maxh);
stop_meas(&gNB->ulsch_channel_compensation_stats);
start_meas(&gNB->ulsch_mrc_stats);
nr_ulsch_detection_mrc(frame_parms,
gNB->pusch_vars[ulsch_id]->rxdataF_comp,
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
(rel15_ul->nrOfLayers>1) ? gNB->pusch_vars[ulsch_id]->rho : NULL,
rel15_ul->nrOfLayers,
symbol,
rel15_ul->rb_size,
nb_re_pusch);
stop_meas(&gNB->ulsch_mrc_stats);
// transform precoding = 0 means enabled
if (rel15_ul->transform_precoding == 0) {
#ifdef __AVX2__
// For odd number of resource blocks need byte alignment to multiple of 8
int nb_re_pusch2 = nb_re_pusch + (nb_re_pusch&7);
#else
int nb_re_pusch2 = nb_re_pusch;
#endif
// perform IDFT operation on the compensated rxdata if transform precoding is enabled
nr_idft(&gNB->pusch_vars[ulsch_id]->rxdataF_comp[0][symbol * nb_re_pusch2], nb_re_pusch);
LOG_D(PHY,"Transform precoding being done on data- symbol: %d, nb_re_pusch: %d\n", symbol, nb_re_pusch);
}
start_meas(&gNB->ulsch_llr_stats);
nr_ulsch_compute_llr(&gNB->pusch_vars[ulsch_id]->rxdataF_comp[0][symbol * (off + rel15_ul->rb_size * NR_NB_SC_PER_RB)],
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
&gNB->pusch_vars[ulsch_id]->llr[gNB->pusch_vars[ulsch_id]->llr_offset[symbol]],
rel15_ul->rb_size,
gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol],
symbol,
rel15_ul->qam_mod_order);
stop_meas(&gNB->ulsch_llr_stats);
//----------------------------------------------------------
//--------------------- PTRS Processing --------------------
//----------------------------------------------------------
// In case PTRS is enabled then LLR will be calculated after PTRS symbols are processed
//otherwise LLR are calculated for each symbol based upon DMRS channel estimates only.
if (rel15_ul->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) {
start_meas(&gNB->ulsch_ptrs_processing_stats);
nr_pusch_ptrs_processing(gNB,
frame_parms,
rel15_ul,
ulsch_id,
slot,
symbol,
nb_re_pusch);
stop_meas(&gNB->ulsch_ptrs_processing_stats);
// unscrambling
// simde__m64 *llr64 = (simde__m64 *) llr;
// for (int i=0;i<(nb_re_pusch*rel15_ul->qam_mod_order)>>2;i++) {
// llr64[i] = simde_mm_mullo_pi16(((simde__m64 *)llr16)[i],((simde__m64 *)s)[i]);
// }
// s+=(nb_re_pusch*rel15_ul->qam_mod_order);
// llr+=(nb_re_pusch*rel15_ul->qam_mod_order);
}
else
{
int soffset = (slot&3)*frame_parms->symbols_per_slot*frame_parms->ofdm_symbol_size;
void (*inner_rx)(NR_DL_FRAME_PARMS *,
NR_gNB_PUSCH *,
nfapi_nr_pusch_pdu_t *,
int32_t **,
int32_t **,
int16_t **,
int32_t,
int32_t,
int32_t,
int32_t,
int32_t,
int16_t,
int32_t,
int32_t);
if (rel15_ul->qam_mod_order == 2) inner_rx = inner_rx_qpsk_2layer;
else if (rel15_ul->qam_mod_order == 4) inner_rx = inner_rx_16qam_2layer;
else if (rel15_ul->qam_mod_order == 6) inner_rx = inner_rx_64qam_2layer;
else AssertFatal(1==0,"rel15_ul->qam_mod_order %d, pusch_pdu->dmrs_config_type %d\n",
rel15_ul->qam_mod_order,rel15_ul->dmrs_config_type);
// Subtract total PTRS RE's in the symbol from PUSCH RE's
gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol] -= gNB->pusch_vars[ulsch_id]->ptrs_re_per_slot;
inner_rx(frame_parms,
pusch_vars,
rel15_ul,
(int32_t**)gNB->common_vars.rxdataF,
gNB->pusch_vars[ulsch_id].ul_ch_estimates,
rdata->llr,
rel15_ul->nrOfLayers,
frame_parms->nb_antennas_rx,
soffset,
nb_re_pusch, // length
symbol, // symbol index
rel15_ul->rb_size, // ofdm size
dmrs_symbol_flag,
gNB->pusch_vars[ulsch_id].log2_maxh);
}
}
// int64_t end = time_now_us();
// printf("Elapsed time = %ld tstamp %ld id %lu \n", end - now, end, pthread_self());
}
*/
/* Zero Forcing Rx function: nr_det_HhH()
*
*
......@@ -2959,12 +3321,12 @@ uint8_t nr_ulsch_zero_forcing_rx_2layers(NR_DL_FRAME_PARMS *frame_parms,
}
/* Main Function */
int nr_rx_pusch(PHY_VARS_gNB *gNB,
int nr_rx_pusch (PHY_VARS_gNB *gNB,
uint8_t ulsch_id,
uint32_t frame,
uint8_t slot,
unsigned char harq_pid)
{
{
uint8_t aarx, aatx;
uint32_t nb_re_pusch, bwp_start_subcarrier;
......@@ -2987,22 +3349,19 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
rel15_ul->nrOfLayers,0,rel15_ul->bwp_start,0,rel15_ul->start_symbol_index,rel15_ul->nr_of_symbols,
rel15_ul->num_dmrs_cdm_grps_no_data,rel15_ul->ul_dmrs_symb_pos,rel15_ul->dmrs_ports);
//----------------------------------------------------------
//--------------------- Channel estimation ---------------------
//----------------- Channel estimation ---------------------
//----------------------------------------------------------
start_meas(&gNB->ulsch_channel_estimation_stats);
int max_ch = 0;
for (uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
{
uint8_t dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
LOG_D(PHY, "symbol %d, dmrs_symbol_flag :%d\n", symbol, dmrs_symbol_flag);
if (dmrs_symbol_flag == 1) {
if (dmrs_symbol_flag == 1)
{
if (pusch_vars->dmrs_symbol == INVALID_VALUE)
pusch_vars->dmrs_symbol = symbol;
for (int nl=0; nl<rel15_ul->nrOfLayers; nl++)
{
nr_pusch_channel_estimation(gNB,
slot,
get_dmrs_port(nl,rel15_ul->dmrs_ports),
......@@ -3011,26 +3370,20 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
bwp_start_subcarrier,
rel15_ul,
&max_ch);
}
nr_gnb_measurements(gNB, ulsch, pusch_vars, symbol,rel15_ul->nrOfLayers);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
{
if (symbol == rel15_ul->start_symbol_index) {
if (symbol == rel15_ul->start_symbol_index)
{
pusch_vars->ulsch_power[aarx] = 0;
pusch_vars->ulsch_noise_power[aarx] = 0;
}
for (aatx = 0; aatx < rel15_ul->nrOfLayers; aatx++) {
pusch_vars->ulsch_power[aarx] += signal_energy_nodc(
&pusch_vars->ul_ch_estimates[aatx*gNB->frame_parms.nb_antennas_rx+aarx][symbol * frame_parms->ofdm_symbol_size],
for (aatx = 0; aatx < rel15_ul->nrOfLayers; aatx++)
pusch_vars->ulsch_power[aarx] += signal_energy_nodc(&pusch_vars->ul_ch_estimates[aatx*gNB->frame_parms.nb_antennas_rx+aarx][symbol * frame_parms->ofdm_symbol_size],
rel15_ul->rb_size * 12);
}
for (int rb = 0; rb < rel15_ul->rb_size; rb++) {
pusch_vars->ulsch_noise_power[aarx] +=
gNB->measurements.n0_subband_power[aarx][rel15_ul->bwp_start + rel15_ul->rb_start + rb] /
rel15_ul->rb_size;
}
for (int rb = 0; rb < rel15_ul->rb_size; rb++)
pusch_vars->ulsch_noise_power[aarx] += gNB->measurements.n0_subband_power[aarx][rel15_ul->bwp_start + rel15_ul->rb_start + rb] / rel15_ul->rb_size;
LOG_D(PHY,"aa %d, bwp_start%d, rb_start %d, rb_size %d: ulsch_power %d, ulsch_noise_power %d\n",aarx,
rel15_ul->bwp_start,rel15_ul->rb_start,rel15_ul->rb_size,
pusch_vars->ulsch_power[aarx],
......@@ -3045,32 +3398,31 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
uint8_t shift_ch_ext = rel15_ul->nrOfLayers > 1 ? log2_approx(max_ch >> 11) : 0;
int ad_shift = 0;
if (rel15_ul->nrOfLayers == 1) {
if (rel15_ul->nrOfLayers == 1)
ad_shift = 1 + log2_approx(frame_parms->nb_antennas_rx >> 2);
} else {
else
ad_shift = -3; // For 2-layers, we are already doing a bit shift in the nr_ulsch_zero_forcing_rx_2layers() function, so we can use more bits
}
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++) {
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
{
uint8_t dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
if (dmrs_symbol_flag == 1) {
if (dmrs_symbol_flag == 1)
{
if ((rel15_ul->ul_dmrs_symb_pos >> ((symbol + 1) % frame_parms->symbols_per_slot)) & 0x01)
AssertFatal(1==0,"Double DMRS configuration is not yet supported\n");
pusch_vars->dmrs_symbol = symbol;
if (rel15_ul->dmrs_config_type == 0) {
if (rel15_ul->dmrs_config_type == 0)
{
// if no data in dmrs cdm group is 1 only even REs have no data
// if no data in dmrs cdm group is 2 both odd and even REs have no data
nb_re_pusch = rel15_ul->rb_size *(12 - (rel15_ul->num_dmrs_cdm_grps_no_data*6));
}
else {
else
nb_re_pusch = rel15_ul->rb_size *(12 - (rel15_ul->num_dmrs_cdm_grps_no_data*4));
}
}
else {
else
nb_re_pusch = rel15_ul->rb_size * NR_NB_SC_PER_RB;
}
pusch_vars->ul_valid_re_per_slot[symbol] = nb_re_pusch;
LOG_D(PHY,"symbol %d: nb_re_pusch %d, DMRS symbl used for Chest :%d \n", symbol, nb_re_pusch, pusch_vars->dmrs_symbol);
......@@ -3078,7 +3430,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
//----------------------------------------------------------
//--------------------- RBs extraction ---------------------
//----------------------------------------------------------
if (nb_re_pusch > 0) {
if (nb_re_pusch > 0)
{
start_meas(&gNB->ulsch_rbs_extraction_stats);
nr_ulsch_extract_rbs(gNB->common_vars.rxdataF,
pusch_vars,
......@@ -3102,7 +3455,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
rel15_ul->rb_size,
shift_ch_ext);
if (pusch_vars->cl_done==0) {
if (pusch_vars->cl_done==0)
{
nr_ulsch_channel_level(pusch_vars->ul_ch_estimates_ext,
frame_parms,
avg,
......@@ -3111,11 +3465,9 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
rel15_ul->nrOfLayers,
rel15_ul->rb_size);
avgs = 0;
for (aatx=0;aatx<rel15_ul->nrOfLayers;aatx++)
for (aarx=0;aarx<frame_parms->nb_antennas_rx;aarx++)
avgs = cmax(avgs,avg[aatx*frame_parms->nb_antennas_rx+aarx]);
for (aatx = 0; aatx < rel15_ul->nrOfLayers; aatx++)
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
avgs = cmax(avgs, avg[aatx*frame_parms->nb_antennas_rx+aarx]);
pusch_vars->log2_maxh = (log2_approx(avgs)/2)+2;
pusch_vars->cl_done = 1;
}
......@@ -3154,8 +3506,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
symbol,
rel15_ul->rb_size,
nb_re_pusch);
if (rel15_ul->nrOfLayers == 2)//Apply zero forcing for 2 Tx layers
// Apply zero forcing for 2 Tx layers
if (rel15_ul->nrOfLayers == 2)
nr_ulsch_zero_forcing_rx_2layers(frame_parms,
pusch_vars->rxdataF_comp,
pusch_vars->ul_ch_mag0,
......@@ -3169,7 +3521,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
nb_re_pusch);
stop_meas(&gNB->ulsch_mrc_stats);
if (rel15_ul->transform_precoding == transformPrecoder_enabled) {
if (rel15_ul->transform_precoding == transformPrecoder_enabled)
{
// For odd number of resource blocks need byte alignment to multiple of 8
int nb_re_pusch2 = nb_re_pusch + (nb_re_pusch&7);
......@@ -3181,8 +3534,10 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
//--------------------- PTRS Processing --------------------
//----------------------------------------------------------
/* In case PTRS is enabled then LLR will be calculated after PTRS symbols are processed *
* otherwise LLR are calculated for each symbol based upon DMRS channel estimates only. */
if (rel15_ul->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) {
* otherwise LLR are calculated for each symbol based upon DMRS channel estimates only. *
*/
if (rel15_ul->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS)
{
start_meas(&gNB->ulsch_ptrs_processing_stats);
nr_pusch_ptrs_processing(gNB,
frame_parms,
......@@ -3201,7 +3556,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
/*-------------------- LLRs computation -------------------------------------------------------------*/
/*-----------------------------------------------------------------------------------------------------*/
start_meas(&gNB->ulsch_llr_stats);
for (aatx=0; aatx < rel15_ul->nrOfLayers; aatx++) {
for (aatx=0; aatx < rel15_ul->nrOfLayers; aatx++)
{
nr_ulsch_compute_llr(&pusch_vars->rxdataF_comp[aatx*frame_parms->nb_antennas_rx][symbol * (off + rel15_ul->rb_size * NR_NB_SC_PER_RB)],
pusch_vars->ul_ch_mag0[aatx*frame_parms->nb_antennas_rx],
pusch_vars->ul_ch_magb0[aatx*frame_parms->nb_antennas_rx],
......@@ -3216,12 +3572,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
rxdataF_ext_offset += pusch_vars->ul_valid_re_per_slot[symbol];
}
} // symbol loop
return 0;
}
}
int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
uint8_t ulsch_id,
......@@ -3250,19 +3602,18 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
LOG_D(PHY,"pusch %d.%d : ul_dmrs_symb_pos %x\n",frame,slot,rel15_ul->ul_dmrs_symb_pos);
//----------------------------------------------------------
//--------------------- Channel estimation ---------------------
//------------------- Channel estimation -------------------
//----------------------------------------------------------
start_meas(&gNB->ulsch_channel_estimation_stats);
int max_ch = 0;
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
for (uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
{
uint8_t dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
LOG_D(PHY, "symbol %d, dmrs_symbol_flag :%d\n", symbol, dmrs_symbol_flag);
if (dmrs_symbol_flag == 1) {
if (dmrs_symbol_flag == 1)
{
if (pusch_vars->dmrs_symbol == INVALID_VALUE)
pusch_vars->dmrs_symbol = symbol;
for (int nl=0; nl<rel15_ul->nrOfLayers; nl++)
nr_pusch_channel_estimation(gNB,
slot,
......@@ -3273,32 +3624,38 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
rel15_ul,
&max_ch);
// measure the SNR from the channel estimation
nr_gnb_measurements(gNB, &gNB->ulsch[ulsch_id], pusch_vars, symbol, rel15_ul->nrOfLayers);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++) {
if (symbol == rel15_ul->start_symbol_index) {
nr_gnb_measurements(gNB,
&gNB->ulsch[ulsch_id],
pusch_vars,
symbol,
rel15_ul->nrOfLayers);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
{
if (symbol == rel15_ul->start_symbol_index)
{
pusch_vars->ulsch_power[aarx] = 0;
pusch_vars->ulsch_noise_power[aarx] = 0;
}
pusch_vars->ulsch_power[aarx] += signal_energy_nodc(
&pusch_vars->ul_ch_estimates[aarx][symbol * frame_parms->ofdm_symbol_size],
pusch_vars->ulsch_power[aarx] += signal_energy_nodc(&pusch_vars->ul_ch_estimates[aarx][symbol * frame_parms->ofdm_symbol_size],
rel15_ul->rb_size * 12);
for (int rb = 0; rb < rel15_ul->rb_size; rb++) {
for (int rb = 0; rb < rel15_ul->rb_size; rb++)
pusch_vars->ulsch_noise_power[aarx] += gNB->measurements.n0_subband_power[aarx][rel15_ul->bwp_start + rel15_ul->rb_start + rb] / rel15_ul->rb_size;
}
}
}
}
if (gNB->chest_time == 1) { // averaging time domain channel estimates
// averaging time domain channel estimates
if (gNB->chest_time == 1)
{
nr_chest_time_domain_avg(frame_parms,
pusch_vars->ul_ch_estimates,
rel15_ul->nr_of_symbols,
rel15_ul->start_symbol_index,
rel15_ul->ul_dmrs_symb_pos,
rel15_ul->rb_size);
pusch_vars->dmrs_symbol =
get_next_dmrs_symbol_in_slot(rel15_ul->ul_dmrs_symb_pos, rel15_ul->start_symbol_index, rel15_ul->nr_of_symbols);
pusch_vars->dmrs_symbol = get_next_dmrs_symbol_in_slot(rel15_ul->ul_dmrs_symb_pos,
rel15_ul->start_symbol_index,
rel15_ul->nr_of_symbols);
}
stop_meas(&gNB->ulsch_channel_estimation_stats);
......@@ -3306,14 +3663,15 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
start_meas(&gNB->rx_pusch_init_stats);
// Scrambling initialization
int number_dmrs_symbols=0;
int number_dmrs_symbols = 0;
for (int l = rel15_ul->start_symbol_index; l < rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols; l++)
number_dmrs_symbols += ((rel15_ul->ul_dmrs_symb_pos)>>l)&0x01;
number_dmrs_symbols += ((rel15_ul->ul_dmrs_symb_pos)>>l) & 0x01;
int nb_re_dmrs;
if (rel15_ul->dmrs_config_type==pusch_dmrs_type1)
if (rel15_ul->dmrs_config_type == pusch_dmrs_type1)
nb_re_dmrs = 6*rel15_ul->num_dmrs_cdm_grps_no_data;
else
nb_re_dmrs = 4*rel15_ul->num_dmrs_cdm_grps_no_data;
// get how many bit in a slot //
int G = nr_get_G(rel15_ul->rb_size,
rel15_ul->nr_of_symbols,
......@@ -3324,26 +3682,22 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
// initialize scrambling sequence //
int16_t s[G+96] __attribute__((aligned(32)));
nr_codeword_unscrambling_init(s,G,0,rel15_ul->data_scrambling_id,rel15_ul->rnti);
nr_codeword_unscrambling_init(s, G, 0, rel15_ul->data_scrambling_id, rel15_ul->rnti);
void (*nr_pusch_symbol_processing_ptr)(void*) = &nr_pusch_symbol_processing_noprecoding;
// void (*nr_pusch_symbol_processing_ptr)(void*) = &nr_pusch_symbol_processing;
// first the computation of channel levels
int nb_re_pusch=0,meas_symbol=-1;
int nb_re_pusch = 0, meas_symbol = -1;
for(meas_symbol = rel15_ul->start_symbol_index;
meas_symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols);
meas_symbol++)
if ((nb_re_pusch = get_nb_re_pusch(frame_parms,rel15_ul,meas_symbol)) > 0) {
if ((nb_re_pusch = get_nb_re_pusch(frame_parms,rel15_ul,meas_symbol)) > 0)
break;
}
AssertFatal(nb_re_pusch>0 && meas_symbol>=0,"nb_re_pusch %d cannot be 0 or meas_symbol %d cannot be negative here\n",nb_re_pusch,meas_symbol);
start_meas(&gNB->ulsch_rbs_extraction_stats);
//* only for the dmrs symbol *//
// extract the first dmrs for the channel level computation
// extract the data in the OFDM frame, to the start of the array
nr_ulsch_extract_rbs(gNB->common_vars.rxdataF,
pusch_vars,
......@@ -3354,8 +3708,22 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
frame_parms);
stop_meas(&gNB->ulsch_rbs_extraction_stats);
int avgs;
int avgs = 0;
int avg[frame_parms->nb_antennas_rx*rel15_ul->nrOfLayers];
uint8_t shift_ch_ext = rel15_ul->nrOfLayers > 1 ? log2_approx(max_ch >> 11) : 0;
//----------------------------------------------------------
//--------------------- Channel Scaling --------------------
//----------------------------------------------------------
nr_ulsch_scale_channel(pusch_vars->ul_ch_estimates_ext,
frame_parms,
&gNB->ulsch[ulsch_id],
meas_symbol,
(rel15_ul->ul_dmrs_symb_pos >> meas_symbol) & 0x01,
nb_re_pusch,
rel15_ul->nrOfLayers,
rel15_ul->rb_size,
shift_ch_ext);
nr_ulsch_channel_level(pusch_vars->ul_ch_estimates_ext,
frame_parms,
......@@ -3365,23 +3733,18 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
rel15_ul->nrOfLayers,
rel15_ul->rb_size);
avgs = 0;
for (int aatx = 0; aatx < rel15_ul->nrOfLayers; aatx++)
for (int aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
avgs = cmax(avgs, avg[aatx*frame_parms->nb_antennas_rx+aarx]);
for (int aatx=0;aatx<rel15_ul->nrOfLayers;aatx++)
for (int aarx=0;aarx<frame_parms->nb_antennas_rx;aarx++) {
avgs = cmax(avgs,avg[aatx*frame_parms->nb_antennas_rx+aarx]);
}
pusch_vars->log2_maxh = (log2_approx(avgs)/2)+2;
pusch_vars->log2_maxh = (log2_approx(avgs)>>1)+2;
pusch_vars->cl_done = 1;
pusch_vars->extraction_done[meas_symbol]=1;
stop_meas(&gNB->rx_pusch_init_stats);
pusch_vars->extraction_done[meas_symbol] = 1;
// int64_t start = time_now_us();
// printf("Tasks started %ld \n", start );
stop_meas(&gNB->rx_pusch_init_stats);
start_meas(&gNB->rx_pusch_symbol_processing_stats);
int numSymbols=gNB->num_pusch_symbols_per_thread;
int numSymbols = gNB->num_pusch_symbols_per_thread;
#ifdef TASK_MANAGER
puschSymbolProc_t arr[rel15_ul->nr_of_symbols];
......@@ -3393,29 +3756,28 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
#ifdef OMP_TP
omp_set_num_threads(4);
#pragma omp parallel
{
#pragma omp single
{
#endif
for(uint8_t symbol = rel15_ul->start_symbol_index;
symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols);
symbol+=numSymbols)
symbol += numSymbols)
{
int total_res=0;
for (int s = 0; s<numSymbols;s++)
int total_res = 0;
for (int s = 0; s < numSymbols;s++)
{
pusch_vars->ul_valid_re_per_slot[symbol+s] = get_nb_re_pusch(frame_parms,rel15_ul,symbol+s);
pusch_vars->llr_offset[symbol+s] = ((symbol+s)==rel15_ul->start_symbol_index) ?
pusch_vars->llr_offset[symbol+s] = ((symbol+s) == rel15_ul->start_symbol_index) ?
0 :
pusch_vars->llr_offset[symbol+s-1] + pusch_vars->ul_valid_re_per_slot[symbol+s-1] * rel15_ul->qam_mod_order;
total_res+=pusch_vars->ul_valid_re_per_slot[symbol+s];
}
if (total_res > 0)
{
#ifdef TASK_MANAGER
puschSymbolProc_t *rdata = &arr[idx_arr];
idx_arr++;
......@@ -3428,6 +3790,7 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
notifiedFIFO_elt_t *req = newNotifiedFIFO_elt(sizeof(puschSymbolProc_t), id.p, gNB->respPuschSymb, nr_pusch_symbol_processing_ptr); // create a job for Tpool
puschSymbolProc_t *rdata = (puschSymbolProc_t*)NotifiedFifoData(req); // data for the job
#endif
rdata->gNB = gNB;
rdata->frame_parms=frame_parms;
rdata->rel15_ul = rel15_ul;
......@@ -3435,7 +3798,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
rdata->startSymbol = symbol;
rdata->numSymbols = numSymbols;
rdata->ulsch_id=ulsch_id;
rdata->llr = &pusch_vars->llr[pusch_vars->llr_offset[symbol]];
// rdata->llr = &pusch_vars->llr[pusch_vars->llr_offset[symbol]];
rdata->llr = pusch_vars->llr_layers;
rdata->s = &s[pusch_vars->llr_offset[symbol]];
#ifdef TASK_MANAGER
......@@ -3457,27 +3821,157 @@ nr_pusch_symbol_processing_noprecoding(rdata);
}
#endif
// printf("Waiting %ld \n", time_now_us());
#ifdef TASK_MANAGER
stop_spin_task_manager(&gNB->man);
wait_all_spin_task_manager(&gNB->man);
#elif OMP_TP
#pragma omp taskwait
#else
while (gNB->nbSymb > 0) {
while (gNB->nbSymb > 0)
{
notifiedFIFO_elt_t *req=pullTpool(gNB->respPuschSymb, &gNB->threadPool);
gNB->nbSymb--;
delNotifiedFIFO_elt(req);
}
#endif
// int64_t const finish = time_now_us();
// printf("Tasks finished %ld delay %ld \n", finish , finish-start );
stop_meas(&gNB->rx_pusch_symbol_processing_stats);
return 0;
}
/*
void nr_pusch_symbol_processing(void *arg) {
puschSymbolProc_t *rdata=(puschSymbolProc_t*)arg;
PHY_VARS_gNB *gNB=rdata->gNB;
NR_DL_FRAME_PARMS *frame_parms=rdata->frame_parms;
nfapi_nr_pusch_pdu_t *rel15_ul=rdata->rel15_ul;
int ulsch_id=rdata->ulsch_id;
int slot=rdata->slot;
int symbol=rdata->symbol;
int dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
int nb_re_pusch = gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol];
if (dmrs_symbol_flag == 1) {
if ((rel15_ul->ul_dmrs_symb_pos >> ((symbol + 1) % frame_parms->symbols_per_slot)) & 0x01)
AssertFatal(1==0,"Double DMRS configuration is not yet supported\n");
gNB->pusch_vars[ulsch_id]->dmrs_symbol = symbol;
}
LOG_D(PHY,"symbol %d: nb_re_pusch %d, DMRS symbl used for Chest :%d \n", symbol, nb_re_pusch, gNB->pusch_vars[ulsch_id]->dmrs_symbol);
#ifdef __AVX2__
int off = ((rel15_ul->rb_size&1) == 1)? 4:0;
#else
int off = 0;
#endif
//----------------------------------------------------------
//--------------------- RBs extraction ---------------------
//----------------------------------------------------------
if (nb_re_pusch > 0) {
if (gNB->pusch_vars[ulsch_id]->extraction_done[symbol]!=1) {
start_meas(&gNB->ulsch_rbs_extraction_stats);
nr_ulsch_extract_rbs(gNB->common_vars.rxdataF,
gNB->pusch_vars[ulsch_id],
slot,
symbol,
dmrs_symbol_flag,
rel15_ul,
frame_parms);
stop_meas(&gNB->ulsch_rbs_extraction_stats);
}
//----------------------------------------------------------
//--------------------- Channel Scaling --------------------
//----------------------------------------------------------
nr_ulsch_scale_channel(gNB->pusch_vars[ulsch_id]->ul_ch_estimates_ext,
frame_parms,
gNB->ulsch[ulsch_id],
symbol,
dmrs_symbol_flag,
nb_re_pusch,
rel15_ul->nrOfLayers,
rel15_ul->rb_size);
//----------------------------------------------------------
//--------------------- Channel Compensation ---------------
//----------------------------------------------------------
start_meas(&gNB->ulsch_channel_compensation_stats);
LOG_D(PHY,"Doing channel compensations log2_maxh %d\n",gNB->pusch_vars[ulsch_id]->log2_maxh);
nr_ulsch_channel_compensation(gNB->pusch_vars[ulsch_id]->rxdataF_ext,
gNB->pusch_vars[ulsch_id]->ul_ch_estimates_ext,
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
gNB->pusch_vars[ulsch_id]->rxdataF_comp,
(rel15_ul->nrOfLayers>1) ? gNB->pusch_vars[ulsch_id]->rho : NULL,
frame_parms,
symbol,
nb_re_pusch,
dmrs_symbol_flag,
rel15_ul->dmrs_config_type,
rel15_ul->qam_mod_order,
rel15_ul->nrOfLayers,
rel15_ul->rb_size,
gNB->pusch_vars[ulsch_id]->log2_maxh);
stop_meas(&gNB->ulsch_channel_compensation_stats);
start_meas(&gNB->ulsch_mrc_stats);
nr_ulsch_detection_mrc(frame_parms,
gNB->pusch_vars[ulsch_id]->rxdataF_comp,
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
(rel15_ul->nrOfLayers>1) ? gNB->pusch_vars[ulsch_id]->rho : NULL,
rel15_ul->nrOfLayers,
symbol,
rel15_ul->rb_size,
nb_re_pusch);
stop_meas(&gNB->ulsch_mrc_stats);
// transform precoding = 0 means enabled
if (rel15_ul->transform_precoding == 0) {
#ifdef __AVX2__
// For odd number of resource blocks need byte alignment to multiple of 8
int nb_re_pusch2 = nb_re_pusch + (nb_re_pusch&7);
#else
int nb_re_pusch2 = nb_re_pusch;
#endif
// perform IDFT operation on the compensated rxdata if transform precoding is enabled
nr_idft(&gNB->pusch_vars[ulsch_id]->rxdataF_comp[0][symbol * nb_re_pusch2], nb_re_pusch);
LOG_D(PHY,"Transform precoding being done on data- symbol: %d, nb_re_pusch: %d\n", symbol, nb_re_pusch);
}
start_meas(&gNB->ulsch_llr_stats);
nr_ulsch_compute_llr(&gNB->pusch_vars[ulsch_id]->rxdataF_comp[0][symbol * (off + rel15_ul->rb_size * NR_NB_SC_PER_RB)],
gNB->pusch_vars[ulsch_id]->ul_ch_mag0,
gNB->pusch_vars[ulsch_id]->ul_ch_magb0,
&gNB->pusch_vars[ulsch_id]->llr[gNB->pusch_vars[ulsch_id]->llr_offset[symbol]],
rel15_ul->rb_size,
gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol],
symbol,
rel15_ul->qam_mod_order);
stop_meas(&gNB->ulsch_llr_stats);
//----------------------------------------------------------
//--------------------- PTRS Processing --------------------
//----------------------------------------------------------
// In case PTRS is enabled then LLR will be calculated after PTRS symbols are processed
//otherwise LLR are calculated for each symbol based upon DMRS channel estimates only.
if (rel15_ul->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) {
start_meas(&gNB->ulsch_ptrs_processing_stats);
nr_pusch_ptrs_processing(gNB,
frame_parms,
rel15_ul,
ulsch_id,
slot,
symbol,
nb_re_pusch);
stop_meas(&gNB->ulsch_ptrs_processing_stats);
// Subtract total PTRS RE's in the symbol from PUSCH RE's
gNB->pusch_vars[ulsch_id]->ul_valid_re_per_slot[symbol] -= gNB->pusch_vars[ulsch_id]->ptrs_re_per_slot;
}
}
}
*/
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
#include <simde/x86/avx2.h>
void simde_mm128_separate_real_imag_parts(simde__m128i *out_re, simde__m128i *out_im, simde__m128i in0, simde__m128i in1)
{
// Put in0 = [Re(0,1) Re(2,3) Im(0,1) Im(2,3)]
in0 = simde_mm_shufflelo_epi16(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in0 = simde_mm_shufflehi_epi16(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in0 = simde_mm_shuffle_epi32(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
// Put xmm1 = [Re(4,5) Re(6,7) Im(4,5) Im(6,7)]
in1 = simde_mm_shufflelo_epi16(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in1 = simde_mm_shufflehi_epi16(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in1 = simde_mm_shuffle_epi32(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
*out_re = simde_mm_unpacklo_epi64(in0, in1);
*out_im = simde_mm_unpackhi_epi64(in0, in1);
}
void simde_mm256_separate_real_imag_parts(simde__m256i *out_re, simde__m256i *out_im, simde__m256i in0, simde__m256i in1)
{
// Put in0 = [Re(0,1,2,3) Im(0,1,2,3) Re(4,5,6,7) Im(4,5,6,7)]
in0 = simde_mm256_shufflelo_epi16(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in0 = simde_mm256_shufflehi_epi16(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in0 = simde_mm256_shuffle_epi32(in0, 0xd8); //_MM_SHUFFLE(0,2,1,3));
// Put in1 = [Re(8,9,10,11) Im(8,9,10,11) Re(12,13,14,15) Im(12,13,14,15)]
in1 = simde_mm256_shufflelo_epi16(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in1 = simde_mm256_shufflehi_epi16(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
in1 = simde_mm256_shuffle_epi32(in1, 0xd8); //_MM_SHUFFLE(0,2,1,3));
// Put tmp0 =[Re(0,1,2,3) Re(8,9,10,11) Re(4,5,6,7) Re(12,13,14,15)]
simde__m256i tmp0 = simde_mm256_unpacklo_epi64(in0, in1);
// Put tmp1 = [Im(0,1,2,3) Im(8,9,10,11) Im(4,5,6,7) Im(12,13,14,15)]
simde__m256i tmp1 = simde_mm256_unpackhi_epi64(in0, in1);
*out_re = simde_mm256_permute4x64_epi64(tmp0, 0xd8);
*out_im = simde_mm256_permute4x64_epi64(tmp1, 0xd8);
}
......@@ -723,6 +723,8 @@ c32_t dot_product(const c16_t *x,
double interp(double x, double *xs, double *ys, int count);
void simde_mm128_separate_real_imag_parts(simde__m128i *out_re, simde__m128i *out_im, simde__m128i in0, simde__m128i in1);
void simde_mm256_separate_real_imag_parts(simde__m256i *out_re, simde__m256i *out_im, simde__m256i in0, simde__m256i in1);
#ifdef __cplusplus
}
......
......@@ -793,7 +793,7 @@ typedef struct puschSymbolProc_s {
int slot;
int startSymbol;
int numSymbols;
int16_t *llr;
int16_t **llr;
int16_t *s;
} puschSymbolProc_t;
......
......@@ -441,7 +441,7 @@ void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH
pusch_pdu->nrOfLayers,
G);
if (gNB->use_pusch_tp == 0) {
// if (gNB->use_pusch_tp == 0) {
nr_ulsch_layer_demapping(gNB->pusch_vars[ULSCH_id].llr,
pusch_pdu->nrOfLayers,
pusch_pdu->qam_mod_order,
......@@ -454,7 +454,7 @@ void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH
start_meas(&gNB->ulsch_unscrambling_stats);
nr_ulsch_unscrambling(gNB->pusch_vars[ULSCH_id].llr, G, pusch_pdu->data_scrambling_id, pusch_pdu->rnti);
stop_meas(&gNB->ulsch_unscrambling_stats);
}
// }
//----------------------------------------------------------
//--------------------- ULSCH decoding ---------------------
//----------------------------------------------------------
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment