Commit 4c74cc66 authored by laurent's avatar laurent

Improve nr_ul_channel_estimation()

parent cb3108f4
......@@ -48,6 +48,10 @@
// main log variables
// Fixme: a better place to be shure it is called
void read_cpu_hardware (void) __attribute__ ((constructor));
void read_cpu_hardware (void) {__builtin_cpu_init(); }
log_mem_cnt_t log_mem_d[2];
int log_mem_flag=0;
int log_mem_multi=1;
......
......@@ -366,6 +366,7 @@ typedef struct {
#define MATLAB_CSHORT_BRACKET3 15
int32_t write_file_matlab(const char *fname, const char *vname, void *data, int length, int dec, unsigned int format, int multiVec);
#define write_output(a, b, c, d, e, f) write_file_matlab(a, b, c, d, e, f, 0)
/*----------------macro definitions for reading log configuration from the config module */
#define CONFIG_STRING_LOG_PREFIX "log_config"
......
......@@ -475,9 +475,7 @@ int phy_init_lte_eNB(PHY_VARS_eNB *eNB,
srs_vars[srs_id].srs = (int32_t *) malloc16_clear (2 * fp->ofdm_symbol_size * sizeof (int32_t));
}
LOG_I(PHY,"PRACH allocation\n");
// PRACH
prach_vars->prachF = (int16_t *) malloc16_clear (1024 * 2 * sizeof (int16_t));
// assume maximum of 64 RX antennas for PRACH receiver
prach_vars->prach_ifft[0] = (int32_t **) malloc16_clear (64 * sizeof (int32_t *));
......@@ -485,8 +483,6 @@ int phy_init_lte_eNB(PHY_VARS_eNB *eNB,
prach_vars->prach_ifft[0][i] = (int32_t *) malloc16_clear (1024 * 2 * sizeof (int32_t));
prach_vars->rxsigF[0] = (int16_t **) malloc16_clear (64 * sizeof (int16_t *));
// PRACH BR
prach_vars_br->prachF = (int16_t *)malloc16_clear( 1024*2*sizeof(int32_t) );
// assume maximum of 64 RX antennas for PRACH receiver
for (int ce_level = 0; ce_level < 4; ce_level++) {
......@@ -576,8 +572,6 @@ void phy_free_lte_eNB(PHY_VARS_eNB *eNB) {
for (UE_id=0; UE_id<NUMBER_OF_SRS_MAX; UE_id++) free_and_zero(srs_vars[UE_id].srs);
free_and_zero(prach_vars->prachF);
for (i = 0; i < 64; i++) free_and_zero(prach_vars->prach_ifft[0][i]);
free_and_zero(prach_vars->prach_ifft[0]);
......@@ -589,7 +583,6 @@ void phy_free_lte_eNB(PHY_VARS_eNB *eNB) {
free_and_zero(prach_vars->rxsigF[ce_level]);
}
free_and_zero(prach_vars_br->prachF);
free_and_zero(prach_vars->rxsigF[0]);
for (int ULSCH_id=0; ULSCH_id<NUMBER_OF_ULSCH_MAX; ULSCH_id++) {
......
......@@ -106,9 +106,9 @@ int32_t lte_ul_channel_estimation(LTE_DL_FRAME_PARMS *frame_parms,
#ifdef DEBUG_CH
if (Ns==0)
LOG_M("drs_seq0.m","drsseq0",ul_ref_sigs_rx[u][v][Msc_RS_idx],2*Msc_RS,2,1);
LOG_M("drs_seq0.m","drsseq0",ul_ref_sigs_rx[u][v][Msc_RS_idx],2*Msc_RS,2,0);
else
LOG_M("drs_seq1.m","drsseq1",ul_ref_sigs_rx[u][v][Msc_RS_idx],2*Msc_RS,2,1);
LOG_M("drs_seq1.m","drsseq1",ul_ref_sigs_rx[u][v][Msc_RS_idx],2*Msc_RS,2,0);
#endif
......@@ -311,7 +311,7 @@ int32_t lte_ul_channel_estimation(LTE_DL_FRAME_PARMS *frame_parms,
// alpha = (int16_t) (((int32_t) SCALE * (int32_t) (pilot_pos2-k))/(pilot_pos2-pilot_pos1));
// beta = (int16_t) (((int32_t) SCALE * (int32_t) (k-pilot_pos1))/(pilot_pos2-pilot_pos1));
#ifdef DEBUG_CH
LOG_D(PHY,"lte_ul_channel_estimation: k=%d, alpha = %d, beta = %d\n",k,alpha,beta);
LOG_D(PHY,"lte_ul_channel_estimation: k=%d\n",k);
#endif
//symbol_offset_subframe = frame_parms->N_RB_UL*12*k;
......@@ -436,9 +436,9 @@ int32_t lte_ul_channel_estimation_RRU(LTE_DL_FRAME_PARMS *frame_parms,
#ifdef DEBUG_CH
if (l==pilot_pos1)
write_output("drs_seq0.m","drsseq0",ul_ref_sigs_rx[u][v][Msc_RS_idx],Msc_RS,1,1);
write_output("drs_seq0.m","drsseq0",ul_ref_sigs_rx[u][v][Msc_RS_idx],Msc_RS,1,0);
else
write_output("drs_seq1.m","drsseq1",ul_ref_sigs_rx[u][v][Msc_RS_idx],Msc_RS,1,1);
write_output("drs_seq1.m","drsseq1",ul_ref_sigs_rx[u][v][Msc_RS_idx],Msc_RS,1,0);
#endif
symbol_offset = frame_parms->N_RB_UL*12*l;
......
......@@ -74,6 +74,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
uint32_t subframe_offset=((frame_parms->Ncp==0)?14:12)*frame_parms->ofdm_symbol_size*subframe;
memset(d,0,24*sizeof(int16_t));
const int nushift=frame_parms->nushift;
if (frame_parms->nb_antenna_ports_eNB==1)
gain_lin_QPSK = (int16_t)(((int32_t)amp*ONE_OVER_SQRT2_Q15)>>15);
......@@ -245,7 +246,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y1_16[7] = -y0_16[5];
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m];
y1[j] += y1_16[m++];
y0[j+1] += y0_16[m];
......@@ -287,7 +288,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y1_16[7] = -y0_16[5];
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m];
y1[j] += y1_16[m++];
y0[j+1] += y0_16[m];
......@@ -329,7 +330,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y1_16[7] = -y0_16[5];
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m];
y1[j] += y1_16[m++];
y0[j+1] += y0_16[m];
......@@ -360,7 +361,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y0_16[7] = d[7]*gain_lin_QPSK;
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m++];
y0[j+1] += y0_16[m++];
}
......@@ -385,7 +386,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y0_16[7] = d[15]*gain_lin_QPSK;
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m++];
y0[j+1] += y0_16[m++];
}
......@@ -410,7 +411,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y0_16[7] = d[23]*gain_lin_QPSK;
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m++];
y0[j+1] += y0_16[m++];
}
......@@ -533,7 +534,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y1_16[7] = -y0_16[5];
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m];
y1[j] += y1_16[m++];
y0[j+1] += y0_16[m];
......@@ -644,7 +645,7 @@ void generate_phich(LTE_DL_FRAME_PARMS *frame_parms,
y0_16[7] = d[7]*gain_lin_QPSK;
for (i=0,j=0,m=0; i<6; i++,j+=2) {
if ((i!=(frame_parms->nushift))&&(i!=(frame_parms->nushift+3))) {
if ((i!=(nushift))&&(i!=((nushift+3)%6))) {
y0[j] += y0_16[m++];
y0[j+1] += y0_16[m++];
}
......
......@@ -62,9 +62,7 @@ void rx_prach0(PHY_VARS_eNB *eNB,
uint8_t Ncs_config;
uint8_t restricted_set;
uint8_t n_ra_prb;
int16_t *prachF=NULL;
int16_t **rxsigF=NULL;
int16_t *prach2;
uint8_t preamble_index;
uint16_t NCS,NCS2;
uint16_t preamble_offset=0,preamble_offset_old;
......@@ -77,12 +75,10 @@ void rx_prach0(PHY_VARS_eNB *eNB,
uint8_t not_found;
int k=0;
uint16_t u;
int16_t *Xu=0;
uint16_t offset;
c16_t *Xu=0;
int16_t Ncp;
uint16_t first_nonzero_root_idx=0;
uint8_t new_dft=0;
uint8_t aa;
int32_t lev;
int16_t levdB;
int fft_size=0,log2_ifft_size;
......@@ -130,14 +126,11 @@ void rx_prach0(PHY_VARS_eNB *eNB,
tdd_mapindex,Nf);
}
int16_t *prach[nb_rx];
uint8_t prach_fmt = get_prach_fmt(prach_ConfigIndex,frame_type);
uint16_t N_ZC = (prach_fmt <4)?839:139;
if (eNB) {
if (br_flag == 1) {
prach_ifftp = eNB->prach_vars_br.prach_ifft[ce_level];
prachF = eNB->prach_vars_br.prachF;
rxsigF = eNB->prach_vars_br.rxsigF[ce_level];
if (LOG_DEBUGFLAG(PRACH)) {
......@@ -151,7 +144,6 @@ void rx_prach0(PHY_VARS_eNB *eNB,
}
} else {
prach_ifftp = eNB->prach_vars.prach_ifft[0];
prachF = eNB->prach_vars.prachF;
rxsigF = eNB->prach_vars.rxsigF[0];
//if (LOG_DEBUGFLAG(PRACH)) {
......@@ -179,7 +171,8 @@ void rx_prach0(PHY_VARS_eNB *eNB,
AssertFatal(ru!=NULL,"ru is null\n");
int8_t dBEn0=0;
for (aa=0; aa<nb_rx; aa++) {
int16_t *prach[nb_rx];
for (int aa=0; aa<nb_rx; aa++) {
if (ru->if_south == LOCAL_RF || ru->function == NGFI_RAU_IF5) { // set the time-domain signal if we have to use it in this node
// DJP - indexing below in subframe zero takes us off the beginning of the array???
prach[aa] = (int16_t *)&ru->common.rxdata[aa][(subframe*fp->samples_per_tti)-ru->N_TA_offset];
......@@ -291,9 +284,9 @@ void rx_prach0(PHY_VARS_eNB *eNB,
LOG_D(PHY,"rx_prach: Doing FFT for N_RB_UL %d nb_rx:%d Ncp:%d\n",fp->N_RB_UL, nb_rx, Ncp);
}
for (aa=0; aa<nb_rx; aa++) {
for (int aa=0; aa<nb_rx; aa++) {
AssertFatal(prach[aa]!=NULL,"prach[%d] is null\n",aa);
prach2 = prach[aa] + (Ncp<<1);
int16_t *prach2 = prach[aa] + (Ncp<<1); // <<1 because type int16 but input is c16
// do DFT
switch (fp->N_RB_UL) {
......@@ -564,33 +557,34 @@ void rx_prach0(PHY_VARS_eNB *eNB,
new_dft = 0;
if (br_flag == 1) {
Xu=(int16_t *)eNB->X_u_br[ce_level][preamble_offset-first_nonzero_root_idx];
Xu=(c16_t *)eNB->X_u_br[ce_level][preamble_offset-first_nonzero_root_idx];
prach_ifft = prach_ifftp[prach_ifft_cnt++];
if (eNB->prach_vars_br.repetition_number[ce_level]==1) memset(prach_ifft,0,((N_ZC==839)?2048:256)*sizeof(int32_t));
} else {
Xu=(int16_t *)eNB->X_u[preamble_offset-first_nonzero_root_idx];
Xu=(c16_t *)eNB->X_u[preamble_offset-first_nonzero_root_idx];
prach_ifft = prach_ifftp[0];
memset(prach_ifft,0,((N_ZC==839) ? 2048 : 256)*sizeof(int32_t));
}
c16_t prachF[1024] __attribute__((aligned(32)))={0};
memset(prachF, 0, sizeof(int16_t)*2*1024 );
if (LOG_DUMPFLAG(PRACH)) {
if (prach[0]!= NULL) LOG_M("prach_rx0.m","prach_rx0",prach[0],6144+792,1,1);
LOG_M("prach_rx1.m","prach_rx1",prach[1],6144+792,1,1);
LOG_M("prach_rxF0.m","prach_rxF0",rxsigF[0],12288,1,1);
LOG_M("prach_rxF1.m","prach_rxF1",rxsigF[1],12288,1,1);
if (LOG_DUMPFLAG(PRACH))
for (int z=0; z<nb_rx; z++)
if( prach[z] ) {
char tmp[128];
sprintf(tmp,"prach_rx%d.m", z);
LOG_M(tmp,tmp,prach[z],6144+792,1,1);
sprintf(tmp,"prach_rxF%d.m", z);
LOG_M(tmp,tmp,rxsigF[z],12288,1,1);
}
for (aa=0; aa<nb_rx; aa++) {
for (int aa=0; aa<nb_rx; aa++) {
// Do componentwise product with Xu* on each antenna
k=0;
for (offset=0; offset<(N_ZC<<1); offset+=2) {
prachF[offset] = (int16_t)(((int32_t)Xu[offset]*rxsigF[aa][k] + (int32_t)Xu[offset+1]*rxsigF[aa][k+1])>>15);
prachF[offset+1] = (int16_t)(((int32_t)Xu[offset]*rxsigF[aa][k+1] - (int32_t)Xu[offset+1]*rxsigF[aa][k])>>15);
for (int offset=0; offset<N_ZC; offset++) {
prachF[offset].r = (int16_t)(((int32_t)Xu[offset].r*rxsigF[aa][k] + (int32_t)Xu[offset].i*rxsigF[aa][k+1])>>15);
prachF[offset].i = (int16_t)(((int32_t)Xu[offset].r*rxsigF[aa][k+1] - (int32_t)Xu[offset].i*rxsigF[aa][k])>>15);
k+=2;
if (k==(12*2*fp->ofdm_symbol_size))
......@@ -600,13 +594,13 @@ void rx_prach0(PHY_VARS_eNB *eNB,
// Now do IFFT of size 1024 (N_ZC=839) or 256 (N_ZC=139)
if (N_ZC == 839) {
log2_ifft_size = 10;
idft(IDFT_1024,prachF,prach_ifft_tmp,1);
idft(IDFT_1024,(int16_t*)prachF,prach_ifft_tmp,1);
// compute energy and accumulate over receive antennas and repetitions for BR
for (i=0; i<2048; i++)
prach_ifft[i] += (prach_ifft_tmp[i<<1]*prach_ifft_tmp[i<<1] + prach_ifft_tmp[1+(i<<1)]*prach_ifft_tmp[1+(i<<1)])>>9;
} else {
idft(IDFT_256,prachF,prach_ifft_tmp,1);
idft(IDFT_256,(int16_t*)prachF,prach_ifft_tmp,1);
log2_ifft_size = 8;
// compute energy and accumulate over receive antennas and repetitions for BR
......@@ -674,14 +668,12 @@ void rx_prach0(PHY_VARS_eNB *eNB,
if (br_flag == 0) {
LOG_M("rxsigF.m","prach_rxF",&rxsigF[0][0],12288,1,1);
LOG_M("prach_rxF_comp0.m","prach_rxF_comp0",prachF,1024,1,1);
LOG_M("Xu.m","xu",Xu,N_ZC,1,1);
LOG_M("prach_ifft0.m","prach_t0",prach_ifft,1024,1,1);
LOG_M("SF2_3.m","sf2_3",&ru->common.rxdata[0][2*fp->samples_per_tti],2*fp->samples_per_tti,1,1);
} else {
LOG_E(PHY,"Dumping prach (br_flag %d), k = %d (n_ra_prb %d)\n",br_flag,k,n_ra_prb);
LOG_M("rxsigF_br.m","prach_rxF_br",&rxsigF[0][0],12288,1,1);
LOG_M("prach_rxF_comp0_br.m","prach_rxF_comp0_br",prachF,1024,1,1);
LOG_M("Xu_br.m","xu_br",Xu,N_ZC,1,1);
LOG_M("prach_ifft0_br.m","prach_t0_br",prach_ifft,1024,1,1);
}
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
#include "PHY/types.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
void bit8_rxdemux(int length,int offset)
{
int i;
short *rx1_ptr = (short *)&PHY_vars->rx_vars[1].RX_DMA_BUFFER[offset];
short *rx0_ptr = (short *)&PHY_vars->rx_vars[0].RX_DMA_BUFFER[offset];
char *rx0_ptr2 = (char *)(&PHY_vars->rx_vars[0].RX_DMA_BUFFER[offset]);
// short tmp;
short r0,i0,r1,i1;
// printf("demuxing: %d,%d\n",length,offset);
// printf("%x %x\n",PHY_vars->chsch_data[0].CHSCH_f_sync[0], PHY_vars->chsch_data[0].CHSCH_f_sync[1]);
for (i=0; i<length; i++) {
r0= (short)(rx0_ptr2[i<<2]); // Re 0
i0= (short)(rx0_ptr2[(i<<2)+1]); // Im 0
r1= (short)(rx0_ptr2[(i<<2)+2]); // Re 1
i1= (short)(rx0_ptr2[(i<<2)+3]); // Im 1
rx0_ptr[(i<<1)] = r0;
rx0_ptr[(i<<1)+1] =i0;
rx1_ptr[i<<1] =r1;
rx1_ptr[(i<<1)+1] =i1;
}
// printf("%x %x\n",PHY_vars->chsch_data[0].CHSCH_f_sync[0], PHY_vars->chsch_data[0].CHSCH_f_sync[1]);
}
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
#include "PHY/types.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
void bit8_txmux(int length,int offset)
{
int i;
short *dest,*dest2;
for (i=0; i<length; i++) {
dest = (short *)&PHY_vars->tx_vars[0].TX_DMA_BUFFER[i+offset];
dest2 = (short *)&PHY_vars->tx_vars[1].TX_DMA_BUFFER[i+offset];
((char *)dest)[0] = (char)(dest[0]>>BIT8_TX_SHIFT);
((char *)dest)[1] = (char)(dest[1]>>BIT8_TX_SHIFT);
((char *)dest)[2] = (char)(dest2[0]>>BIT8_TX_SHIFT);
((char *)dest)[3] = (char)(dest2[1]>>BIT8_TX_SHIFT);
}
}
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
#include "defs.h"
static __m128i alpha_128 __attribute__ ((aligned(16)));
static __m128i shift __attribute__ ((aligned(16)));
int add_cpx_vector(short *x,
short *alpha,
short *y,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
alpha_128 = _mm_set1_epi32(*((int*)alpha));
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>3); i++) {
y_128[0] = _mm_adds_epi16(alpha_128, x_128[0]);
y_128[1] = _mm_adds_epi16(alpha_128, x_128[1]);
y_128[2] = _mm_adds_epi16(alpha_128, x_128[2]);
y_128[3] = _mm_adds_epi16(alpha_128, x_128[3]);
x_128+=4;
y_128 +=4;
}
return (0);
}
int add_vector32_scalar(short *x,
int alpha,
short *y,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
alpha_128 = _mm_setr_epi32(alpha,0,alpha,0);
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>3); i++) {
y_128[0] = _mm_add_epi32(alpha_128, x_128[0]);
y_128[1] = _mm_add_epi32(alpha_128, x_128[1]);
y_128[2] = _mm_add_epi32(alpha_128, x_128[2]);
y_128[3] = _mm_add_epi32(alpha_128, x_128[3]);
x_128+=4;
y_128 +=4;
}
return (0);
}
int add_real_vector64_scalar(short *x,
long long int a,
short *y,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
alpha_128 = _mm_set1_epi64((__m64) a);
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>3); i++) {
y_128[0] = _mm_add_epi64(alpha_128, x_128[0]);
y_128[1] = _mm_add_epi64(alpha_128, x_128[1]);
y_128[2] = _mm_add_epi64(alpha_128, x_128[2]);
y_128[3] = _mm_add_epi64(alpha_128, x_128[3]);
x_128+=4;
y_128+=4;
}
return(0);
}
#ifdef MAIN
#include <stdio.h>
main ()
{
short input[256] __attribute__((aligned(16)));
short output[256] __attribute__((aligned(16)));
int i;
c16_t alpha;
Zero_Buffer(output,256*2);
input[0] = 100;
input[1] = 200;
input[2] = 100;
input[3] = 200;
input[4] = 1234;
input[5] = -1234;
input[6] = 1234;
input[7] = -1234;
input[8] = 100;
input[9] = 200;
input[10] = 100;
input[11] = 200;
input[12] = 1000;
input[13] = 2000;
input[14] = 1000;
input[15] = 2000;
alpha.r = 10;
alpha.i = -10;
add_cpx_vector(input,(short*) &alpha,input,8);
for (i=0; i<16; i+=2)
printf("output[%d] = %d + %d i\n",i,input[i],input[i+1]);
}
#endif //MAIN
......@@ -23,153 +23,6 @@
#include "tools_defs.h"
int add_vector16(short *x,
short *y,
short *z,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
for(i=0; i<(N>>5); i++) {
/*
printf("i = %d\n",i);
print_shorts(x_128[0],"x[0]=");
print_shorts(y_128[0],"y[0]=");
*/
z_128[0] = _mm_adds_epi16(x_128[0],y_128[0]);
/*
print_shorts(z_128[0],"z[0]=");
print_shorts(x_128[1],"x[1]=");
print_shorts(y_128[1],"y[1]=");
*/
z_128[1] = _mm_adds_epi16(x_128[1],y_128[1]);
/*
print_shorts(z_128[1],"z[1]=");
print_shorts(x_128[2],"x[2]=");
print_shorts(y_128[2],"y[2]=");
*/
z_128[2] = _mm_adds_epi16(x_128[2],y_128[2]);
/*
print_shorts(z_128[2],"z[2]=");
print_shorts(x_128[3],"x[3]=");
print_shorts(y_128[3],"y[3]=");
*/
z_128[3] = _mm_adds_epi16(x_128[3],y_128[3]);
/*
print_shorts(z_128[3],"z[3]=");
*/
x_128 +=4;
y_128 +=4;
z_128 +=4;
}
_mm_empty();
_m_empty();
return(0);
}
/*
print_shorts64(__m64 x,char *s) {
printf("%s ",s);
printf("%d %d %d %d\n",((short *)&x)[0],((short *)&x)[1],((short *)&x)[2],((short *)&x)[3]);
}
*/
int add_vector16_64(short *x,
short *y,
short *z,
unsigned int N)
{
unsigned int i; // loop counter
__m64 *x_64;
__m64 *y_64;
__m64 *z_64;
x_64 = (__m64 *)&x[0];
y_64 = (__m64 *)&y[0];
z_64 = (__m64 *)&z[0];
for(i=0; i<(N>>2); i++) {
/*
printf("i = %d\n",i);
print_shorts64(x_64[i],"x[i]=");
print_shorts64(y_64[i],"y[i]=");
*/
z_64[i] = _mm_adds_pi16(x_64[i],y_64[i]);
/*
print_shorts64(z_64[i],"z[i]=");
*/
}
_mm_empty();
_m_empty();
return(0);
}
int add_cpx_vector32(short *x,
short *y,
short *z,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_add_epi32(x_128[0],y_128[0]);
z_128[1] = _mm_add_epi32(x_128[1],y_128[1]);
z_128[2] = _mm_add_epi32(x_128[2],y_128[2]);
z_128[3] = _mm_add_epi32(x_128[3],y_128[3]);
x_128+=4;
y_128 +=4;
z_128 +=4;
}
_mm_empty();
_m_empty();
return(0);
}
int32_t sub_cpx_vector16(int16_t *x,
int16_t *y,
int16_t *z,
......@@ -201,73 +54,6 @@ int32_t sub_cpx_vector16(int16_t *x,
int add_real_vector64(short *x,
short* y,
short *z,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_add_epi64(x_128[0], y_128[0]);
z_128[1] = _mm_add_epi64(x_128[1], y_128[1]);
z_128[2] = _mm_add_epi64(x_128[2], y_128[2]);
z_128[3] = _mm_add_epi64(x_128[3], y_128[3]);
x_128+=4;
y_128+=4;
z_128+=4;
}
_mm_empty();
_m_empty();
return(0);
}
int sub_real_vector64(short *x,
short* y,
short *z,
unsigned int N)
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_sub_epi64(x_128[0], y_128[0]);
z_128[1] = _mm_sub_epi64(x_128[1], y_128[1]);
z_128[2] = _mm_sub_epi64(x_128[2], y_128[2]);
z_128[3] = _mm_sub_epi64(x_128[3], y_128[3]);
x_128+=4;
y_128+=4;
z_128+=4;
}
_mm_empty();
_m_empty();
return(0);
}
#ifdef MAIN
#include <stdio.h>
......@@ -280,7 +66,7 @@ main ()
int i;
c16_t alpha;
Zero_Buffer(output,256*2);
memset(output,0,256*2);
input[0] = 100;
input[1] = 200;
......
......@@ -22,28 +22,6 @@
#include "PHY/sse_intrin.h"
#include "tools_defs.h"
#if defined(__x86_64__) || defined(__i386__)
#define simd_q15_t __m128i
#define simdshort_q15_t __m64
#define shiftright_int16(a,shift) _mm_srai_epi16(a,shift)
#define set1_int16(a) _mm_set1_epi16(a)
#define mulhi_int16(a,b) _mm_slli_epi16(_mm_mulhi_epi16(a,b),1)
#define mulhi_s1_int16(a,b) _mm_slli_epi16(_mm_mulhi_epi16(a,b),2)
#define adds_int16(a,b) _mm_adds_epi16(a,b)
#define mullo_int16(a,b) _mm_mullo_epi16(a,b)
#elif defined(__arm__)
#define simd_q15_t int16x8_t
#define simdshort_q15_t int16x4_t
#define shiftright_int16(a,shift) vshrq_n_s16(a,shift)
#define set1_int16(a) vdupq_n_s16(a)
#define mulhi_int16(a,b) vqdmulhq_s16(a,b)
#define mulhi_s1_int16(a,b) vshlq_n_s16(vqdmulhq_s16(a,b),1)
#define adds_int16(a,b) vqaddq_s16(a,b)
#define mullo_int16(a,b) vmulq_s16(a,b)
#define _mm_empty()
#define _m_empty()
#endif
void multadd_complex_vector_real_scalar(int16_t *x,
int16_t alpha,
......@@ -111,38 +89,49 @@ void multadd_real_vector_complex_scalar(int16_t *x,
}
}
void multadd_real_four_symbols_vector_complex_scalar(int16_t *x,
int16_t *alpha,
int16_t *y)
{
// do 8 multiplications at a time
simd_q15_t alpha_r_128,alpha_i_128,yr,yi,*x_128=(simd_q15_t*)x;
simd_q15_t y_128;
y_128 = _mm_loadu_si128((simd_q15_t*)y);
alpha_r_128 = set1_int16(alpha[0]);
alpha_i_128 = set1_int16(alpha[1]);
yr = mulhi_s1_int16(alpha_r_128,x_128[0]);
yi = mulhi_s1_int16(alpha_i_128,x_128[0]);
y_128 = _mm_adds_epi16(y_128,_mm_unpacklo_epi16(yr,yi));
y_128 = _mm_adds_epi16(y_128,_mm_unpackhi_epi16(yr,yi));
_mm_storeu_si128((simd_q15_t*)y, y_128);
_mm_empty();
_m_empty();
}
#ifdef __AVX2__
void rotate_cpx_vector(c16_t *x,
c16_t *alpha,
c16_t *y,
uint32_t N,
uint16_t output_shift)
{
// multiply a complex vector with a complex value (alpha)
// stores result in y
// N is the number of complex numbers
// output_shift reduces the result of the multiplication by this number of bits
//AssertFatal(N%8==0, "To be developped");
#ifdef __AVX2__
if ( (intptr_t)x%32 == 0 && !(intptr_t)y%32 == 0 && __builtin_cpu_supports("avx2")) {
// output is 32 bytes aligned, but not the input
const c16_t for_re={alpha->r, -alpha->i};
__m256i const alpha_for_real = _mm256_set1_epi32(*(uint32_t*)&for_re);
const c16_t for_im={alpha->i, alpha->r};
__m256i const alpha_for_im= _mm256_set1_epi32(*(uint32_t*)&for_im);
__m256i const perm_mask =
_mm256_set_epi8(31,30,23,22,29,28,21,20,27,26,19,18,25,24,17,16,
15,14,7,6,13,12,5,4,11,10,3,2,9,8,1,0);
__m256i* xd= (__m256i*)x;
const __m256i *end=xd+N/8;
for( __m256i* yd = (__m256i *)y; xd<end ; yd++, xd++) {
const __m256i xre = _mm256_srai_epi32(_mm256_madd_epi16(*xd,alpha_for_real),
output_shift);
const __m256i xim = _mm256_srai_epi32(_mm256_madd_epi16(*xd,alpha_for_im),
output_shift);
// a bit faster than unpacklo+unpackhi+packs
const __m256i tmp=_mm256_packs_epi32(xre,xim);
*yd=_mm256_shuffle_epi8(tmp,perm_mask);
}
c16_t* alpha16=(c16_t*) alpha, *yLast;
yLast=((c16_t*)y)+(N/8)*8;
for (c16_t* xTail=(c16_t*) end;
xTail<((c16_t*) x)+N;
xTail++, yLast++) {
*yLast=c16mulShift(*xTail,*alpha16,output_shift);
}
} else {
#endif
// Multiply elementwise two complex vectors of N elements
// x - input 1 in the format |Re0 Im0 |,......,|Re(N-1) Im(N-1)|
// We assume x1 with a dynamic of 15 bit maximum
......@@ -229,99 +218,9 @@ void rotate_cpx_vector(c16_t *x,
xd+=4;
y_128+=1;
}
_mm_empty();
_m_empty();
return;
}
#endif
/*
int mult_vector32_scalar(int16_t *x1,
int x2,
int16_t *y,
uint32_t N)
{
// Multiply elementwise two real vectors of N elements
// x1 - input 1 in the format |Re(0) xxx Re(1) xxx|,......,|Re(N-2) xxx Re(N-1) xxx|
// We assume x1 with a dinamic of 31 bit maximum
//
// x1 - input 2
//
// y - output in the format |Re0 (64bit) |,......,|Re(N-1) (64bit)|
//
// N - the size f the vectors (this function does N cpx mpy. WARNING: N>=4;
//
uint32_t i; // loop counter
__m128i *x1_128;
__m128i x2_128;
__m128i *y_128;
x1_128 = (__m128i *)&x1[0];
x2_128 = _mm_setr_epi32(x2,0,x2,0);
y_128 = (__m128i *)&y[0];
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>3); i++) {
y_128[0] = _mm_mul_epu32(x1_128[0],x2_128);
y_128[1] = _mm_mul_epu32(x1_128[1],x2_128);
y_128[2] = _mm_mul_epu32(x1_128[2],x2_128);
y_128[3] = _mm_mul_epu32(x1_128[3],x2_128);
x1_128+=4;
y_128 +=4;
}
_mm_empty();
_m_empty();
return(0);
}
*/
int complex_conjugate(int16_t *x1,
int16_t *y,
uint32_t N)
{
uint32_t i; // loop counter
simd_q15_t *x1_128;
simd_q15_t *y_128;
int16_t x2[8] __attribute__((aligned(16))) = {1,-1,1,-1,1,-1,1,-1};
simd_q15_t *x2_128 = (simd_q15_t*)&x2[0];
x1_128 = (simd_q15_t *)&x1[0];
y_128 = (simd_q15_t *)&y[0];
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>3); i++) {
y_128[0] = mullo_int16(x1_128[0],*x2_128);
y_128[1] = mullo_int16(x1_128[1],*x2_128);
y_128[2] = mullo_int16(x1_128[2],*x2_128);
y_128[3] = mullo_int16(x1_128[3],*x2_128);
x1_128+=4;
y_128 +=4;
}
_mm_empty();
_m_empty();
return(0);
}
#ifdef MAIN
#define L 8
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
#include "PHY/sse_intrin.h"
void Zero_Buffer(void *buf,unsigned int length)
{
// zeroes the mmx_t buffer 'buf' starting from buf[0] to buf[length-1] in bytes
int i;
register __m64 mm0;
__m64 *mbuf = (__m64 *)buf;
// length>>=3; // put length in quadwords
mm0 = _m_pxor(mm0,mm0); // clear the register
for(i=0; i<length>>3; i++) // for each i
mbuf[i] = mm0; // put 0 in buf[i]
_mm_empty();
}
void mmxcopy(void *dest,void *src,int size)
{
// copy size bytes from src to dest
register int i;
register __m64 mm0;
__m64 *mmsrc = (__m64 *)src, *mmdest= (__m64 *)dest;
for (i=0; i<size>>3; i++) {
mm0 = mmsrc[i];
mmdest[i] = mm0;
}
_mm_empty();
}
void Zero_Buffer_nommx(void *buf,unsigned int length)
{
int i;
for (i=0; i<length>>2; i++)
((int *)buf)[i] = 0;
}
......@@ -2575,7 +2575,6 @@ const static int16_t tw64c[96] __attribute__((aligned(32))) = {
#define simd_q15_t __m128i
#define simdshort_q15_t __m64
#define shiftright_int16(a,shift) _mm_srai_epi16(a,shift)
#define set1_int16(a) _mm_set1_epi16(a);
#define mulhi_int16(a,b) _mm_mulhrs_epi16 (a,b)
#ifdef __AVX2__
#define simd256_q15_t __m256i
......
This diff is collapsed.
......@@ -181,9 +181,6 @@ typedef struct {
typedef struct {
/// \brief ?.
/// first index: ? [0..1023] (hard coded)
int16_t *prachF;
/// \brief ?.
/// first index: ce_level [0..3]
/// second index: rx antenna [0..63] (hard coded) \note Hard coded array size indexed by \c nb_antennas_rx.
......
......@@ -49,7 +49,7 @@ extern int oai_nfapi_rach_ind(nfapi_rach_indication_t *rach_ind);
void prach_procedures(PHY_VARS_eNB *eNB,
int br_flag ) {
uint16_t max_preamble[4],max_preamble_energy[4],max_preamble_delay[4],avg_preamble_energy[4];
uint16_t max_preamble[4]={0},max_preamble_energy[4]={0},max_preamble_delay[4]={0},avg_preamble_energy[4]={0};
uint16_t i;
int frame,subframe;
......
......@@ -145,7 +145,7 @@ class gtpEndPoints {
~gtpEndPoints() {
// automatically close all sockets on quit
for (const auto p : instances)
for (const auto &p : instances)
close(p.first);
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment