ulsch_demodulation.c 53.1 KB
Newer Older
1 2 3 4 5
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
6
 * the OAI Public License, Version 1.1  (the "License"); you may not use this file
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */

22 23 24 25 26 27 28 29 30 31 32
/*! \file PHY/LTE_TRANSPORT/ulsch_demodulation.c
* \brief Top-level routines for demodulating the PUSCH physical channel from 36.211 V8.6 2009-03
* \author R. Knopp
* \date 2011
* \version 0.1
* \company Eurecom
* \email: knopp@eurecom.fr, florian.kaltenberger@eurecom.fr, ankit.bhamri@eurecom.fr
* \note
* \warning
*/

33 34 35
#include "PHY/defs_eNB.h"
#include "PHY/phy_extern.h"
#include "transport_eNB.h"
36
#include "PHY/sse_intrin.h"
37 38 39
#include "transport_common_proto.h"
#include "PHY/LTE_ESTIMATION/lte_estimation.h"
#include "PHY/MODULATION/modulation_eNB.h"
40

41 42
#include "T.h"

43 44 45 46 47 48 49 50
//extern char* namepointer_chMag ;
//eren
//extern int **ulchmag_eren;
//eren

static short jitter[8]  __attribute__ ((aligned(16))) = {1,0,0,1,0,1,1,0};
static short jitterc[8] __attribute__ ((aligned(16))) = {0,1,1,0,1,0,0,1};

51
void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH) {
52
#if defined(__x86_64__) || defined(__i386__)
53
  __m128i idft_in128[3][1200],idft_out128[3][1200];
54 55 56 57 58
  __m128i norm128;
#elif defined(__arm__)
  int16x8_t idft_in128[3][1200],idft_out128[3][1200];
  int16x8_t norm128;
#endif
59 60 61
  int16_t *idft_in0=(int16_t *)idft_in128[0],*idft_out0=(int16_t *)idft_out128[0];
  int16_t *idft_in1=(int16_t *)idft_in128[1],*idft_out1=(int16_t *)idft_out128[1];
  int16_t *idft_in2=(int16_t *)idft_in128[2],*idft_out2=(int16_t *)idft_out128[2];
62 63
  uint32_t *z0,*z1,*z2,*z3,*z4,*z5,*z6,*z7,*z8,*z9,*z10=NULL,*z11=NULL;
  int i,ip;
64
  LOG_T(PHY,"Doing lte_idft for Msc_PUSCH %d\n",Msc_PUSCH);
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81

  if (frame_parms->Ncp == 0) { // Normal prefix
    z0 = z;
    z1 = z0+(frame_parms->N_RB_DL*12);
    z2 = z1+(frame_parms->N_RB_DL*12);
    //pilot
    z3 = z2+(2*frame_parms->N_RB_DL*12);
    z4 = z3+(frame_parms->N_RB_DL*12);
    z5 = z4+(frame_parms->N_RB_DL*12);
    z6 = z5+(frame_parms->N_RB_DL*12);
    z7 = z6+(frame_parms->N_RB_DL*12);
    z8 = z7+(frame_parms->N_RB_DL*12);
    //pilot
    z9 = z8+(2*frame_parms->N_RB_DL*12);
    z10 = z9+(frame_parms->N_RB_DL*12);
    // srs
    z11 = z10+(frame_parms->N_RB_DL*12);
82
  } else { // extended prefix
83 84 85 86 87 88 89 90 91 92 93 94 95 96
    z0 = z;
    z1 = z0+(frame_parms->N_RB_DL*12);
    //pilot
    z2 = z1+(2*frame_parms->N_RB_DL*12);
    z3 = z2+(frame_parms->N_RB_DL*12);
    z4 = z3+(frame_parms->N_RB_DL*12);
    z5 = z4+(frame_parms->N_RB_DL*12);
    z6 = z5+(frame_parms->N_RB_DL*12);
    //pilot
    z7 = z6+(2*frame_parms->N_RB_DL*12);
    z8 = z7+(frame_parms->N_RB_DL*12);
    // srs
    z9 = z8+(frame_parms->N_RB_DL*12);
  }
97

98
  // conjugate input
99
  for (i=0; i<(Msc_PUSCH>>2); i++) {
100
#if defined(__x86_64__)||defined(__i386__)
101 102 103 104 105 106 107 108 109 110
    * &(((__m128i *)z0)[i])=_mm_sign_epi16( *&(((__m128i *)z0)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z1)[i])=_mm_sign_epi16( *&(((__m128i *)z1)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z2)[i])=_mm_sign_epi16( *&(((__m128i *)z2)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z3)[i])=_mm_sign_epi16( *&(((__m128i *)z3)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z4)[i])=_mm_sign_epi16( *&(((__m128i *)z4)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z5)[i])=_mm_sign_epi16( *&(((__m128i *)z5)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z6)[i])=_mm_sign_epi16( *&(((__m128i *)z6)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z7)[i])=_mm_sign_epi16( *&(((__m128i *)z7)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z8)[i])=_mm_sign_epi16( *&(((__m128i *)z8)[i]),*(__m128i *)&conjugate2[0]);
    * &(((__m128i *)z9)[i])=_mm_sign_epi16( *&(((__m128i *)z9)[i]),*(__m128i *)&conjugate2[0]);
111

112
    if (frame_parms->Ncp==NORMAL) {
113 114
      * &(((__m128i *)z10)[i])=_mm_sign_epi16( *&(((__m128i *)z10)[i]),*(__m128i *)&conjugate2[0]);
      * &(((__m128i *)z11)[i])=_mm_sign_epi16( *&(((__m128i *)z11)[i]),*(__m128i *)&conjugate2[0]);
115
    }
116

117 118 119 120 121 122 123 124 125 126 127
#elif defined(__arm__)
    * &(((int16x8_t *)z0)[i])=vmulq_s16( *&(((int16x8_t *)z0)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z1)[i])=vmulq_s16( *&(((int16x8_t *)z1)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z2)[i])=vmulq_s16( *&(((int16x8_t *)z2)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z3)[i])=vmulq_s16( *&(((int16x8_t *)z3)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z4)[i])=vmulq_s16( *&(((int16x8_t *)z4)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z5)[i])=vmulq_s16( *&(((int16x8_t *)z5)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z6)[i])=vmulq_s16( *&(((int16x8_t *)z6)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z7)[i])=vmulq_s16( *&(((int16x8_t *)z7)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z8)[i])=vmulq_s16( *&(((int16x8_t *)z8)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z9)[i])=vmulq_s16( *&(((int16x8_t *)z9)[i]),*(int16x8_t *)&conjugate2[0]);
128 129

    if (frame_parms->Ncp==NORMAL) {
130 131
      * &(((int16x8_t *)z10)[i])=vmulq_s16( *&(((int16x8_t *)z10)[i]),*(int16x8_t *)&conjugate2[0]);
      * &(((int16x8_t *)z11)[i])=vmulq_s16( *&(((int16x8_t *)z11)[i]),*(int16x8_t *)&conjugate2[0]);
132 133 134
    }

#endif
135 136 137
  }

  for (i=0,ip=0; i<Msc_PUSCH; i++,ip+=4) {
138 139 140 141 142 143 144 145 146 147
    ((uint32_t *)idft_in0)[ip+0] =  z0[i];
    ((uint32_t *)idft_in0)[ip+1] =  z1[i];
    ((uint32_t *)idft_in0)[ip+2] =  z2[i];
    ((uint32_t *)idft_in0)[ip+3] =  z3[i];
    ((uint32_t *)idft_in1)[ip+0] =  z4[i];
    ((uint32_t *)idft_in1)[ip+1] =  z5[i];
    ((uint32_t *)idft_in1)[ip+2] =  z6[i];
    ((uint32_t *)idft_in1)[ip+3] =  z7[i];
    ((uint32_t *)idft_in2)[ip+0] =  z8[i];
    ((uint32_t *)idft_in2)[ip+1] =  z9[i];
148

149
    if (frame_parms->Ncp==0) {
150 151
      ((uint32_t *)idft_in2)[ip+2] =  z10[i];
      ((uint32_t *)idft_in2)[ip+3] =  z11[i];
152 153
    }
  }
154

155
  switch (Msc_PUSCH) {
156
    case 12:
frtabu's avatar
frtabu committed
157 158 159
      dft(DFT_12,(int16_t *)idft_in0,(int16_t *)idft_out0,0);
      dft(DFT_12,(int16_t *)idft_in1,(int16_t *)idft_out1,0);
      dft(DFT_12,(int16_t *)idft_in2,(int16_t *)idft_out2,0);
160
#if defined(__x86_64__)||defined(__i386__)
161
      norm128 = _mm_set1_epi16(9459);
162
#elif defined(__arm__)
163
      norm128 = vdupq_n_s16(9459);
164
#endif
165 166

      for (i=0; i<12; i++) {
167
#if defined(__x86_64__)||defined(__i386__)
168 169 170
        ((__m128i *)idft_out0)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i *)idft_out0)[i],norm128),1);
        ((__m128i *)idft_out1)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i *)idft_out1)[i],norm128),1);
        ((__m128i *)idft_out2)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i *)idft_out2)[i],norm128),1);
171
#elif defined(__arm__)
172 173 174
        ((int16x8_t *)idft_out0)[i] = vqdmulhq_s16(((int16x8_t *)idft_out0)[i],norm128);
        ((int16x8_t *)idft_out1)[i] = vqdmulhq_s16(((int16x8_t *)idft_out1)[i],norm128);
        ((int16x8_t *)idft_out2)[i] = vqdmulhq_s16(((int16x8_t *)idft_out2)[i],norm128);
175
#endif
176
      }
177

178
      break;
179

180
    case 24:
frtabu's avatar
frtabu committed
181 182 183
      dft(DFT_24,idft_in0,idft_out0,1);
      dft(DFT_24,idft_in1,idft_out1,1);
      dft(DFT_24,idft_in2,idft_out2,1);
184 185 186
      break;

    case 36:
frtabu's avatar
frtabu committed
187 188 189
      dft(DFT_36,idft_in0,idft_out0,1);
      dft(DFT_36,idft_in1,idft_out1,1);
      dft(DFT_36,idft_in2,idft_out2,1);
190 191 192
      break;

    case 48:
frtabu's avatar
frtabu committed
193 194 195
      dft(DFT_48,idft_in0,idft_out0,1);
      dft(DFT_48,idft_in1,idft_out1,1);
      dft(DFT_48,idft_in2,idft_out2,1);
196 197 198
      break;

    case 60:
frtabu's avatar
frtabu committed
199 200 201
      dft(DFT_60,idft_in0,idft_out0,1);
      dft(DFT_60,idft_in1,idft_out1,1);
      dft(DFT_60,idft_in2,idft_out2,1);
202
      break;
203

204
    case 72:
frtabu's avatar
frtabu committed
205 206 207
      dft(DFT_72,idft_in0,idft_out0,1);
      dft(DFT_72,idft_in1,idft_out1,1);
      dft(DFT_72,idft_in2,idft_out2,1);
208 209 210
      break;

    case 96:
frtabu's avatar
frtabu committed
211 212 213
      dft(DFT_96,idft_in0,idft_out0,1);
      dft(DFT_96,idft_in1,idft_out1,1);
      dft(DFT_96,idft_in2,idft_out2,1);
214 215 216
      break;

    case 108:
frtabu's avatar
frtabu committed
217 218 219
      dft(DFT_108,idft_in0,idft_out0,1);
      dft(DFT_108,idft_in1,idft_out1,1);
      dft(DFT_108,idft_in2,idft_out2,1);
220 221 222
      break;

    case 120:
frtabu's avatar
frtabu committed
223 224 225
      dft(DFT_120,idft_in0,idft_out0,1);
      dft(DFT_120,idft_in1,idft_out1,1);
      dft(DFT_120,idft_in2,idft_out2,1);
226 227 228
      break;

    case 144:
frtabu's avatar
frtabu committed
229 230 231
      dft(DFT_144,idft_in0,idft_out0,1);
      dft(DFT_144,idft_in1,idft_out1,1);
      dft(DFT_144,idft_in2,idft_out2,1);
232 233 234
      break;

    case 180:
frtabu's avatar
frtabu committed
235 236 237
      dft(DFT_180,idft_in0,idft_out0,1);
      dft(DFT_180,idft_in1,idft_out1,1);
      dft(DFT_180,idft_in2,idft_out2,1);
238 239 240
      break;

    case 192:
frtabu's avatar
frtabu committed
241 242 243
      dft(DFT_192,idft_in0,idft_out0,1);
      dft(DFT_192,idft_in1,idft_out1,1);
      dft(DFT_192,idft_in2,idft_out2,1);
244 245 246
      break;

    case 216:
frtabu's avatar
frtabu committed
247 248 249
      dft(DFT_216,idft_in0,idft_out0,1);
      dft(DFT_216,idft_in1,idft_out1,1);
      dft(DFT_216,idft_in2,idft_out2,1);
250 251 252
      break;

    case 240:
frtabu's avatar
frtabu committed
253 254 255
      dft(DFT_240,idft_in0,idft_out0,1);
      dft(DFT_240,idft_in1,idft_out1,1);
      dft(DFT_240,idft_in2,idft_out2,1);
256 257 258
      break;

    case 288:
frtabu's avatar
frtabu committed
259 260 261
      dft(DFT_288,idft_in0,idft_out0,1);
      dft(DFT_288,idft_in1,idft_out1,1);
      dft(DFT_288,idft_in2,idft_out2,1);
262 263 264
      break;

    case 300:
frtabu's avatar
frtabu committed
265 266 267
      dft(DFT_300,idft_in0,idft_out0,1);
      dft(DFT_300,idft_in1,idft_out1,1);
      dft(DFT_300,idft_in2,idft_out2,1);
268 269 270
      break;

    case 324:
frtabu's avatar
frtabu committed
271 272 273
      dft(DFT_324,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_324,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_324,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
274 275 276
      break;

    case 360:
frtabu's avatar
frtabu committed
277 278 279
      dft(DFT_360,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_360,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_360,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
280 281 282
      break;

    case 384:
frtabu's avatar
frtabu committed
283 284 285
      dft(DFT_384,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_384,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_384,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
286 287 288
      break;

    case 432:
frtabu's avatar
frtabu committed
289 290 291
      dft(DFT_432,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_432,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_432,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
292 293 294
      break;

    case 480:
frtabu's avatar
frtabu committed
295 296 297
      dft(DFT_480,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_480,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_480,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
298 299 300
      break;

    case 540:
frtabu's avatar
frtabu committed
301 302 303
      dft(DFT_540,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_540,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_540,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
304 305 306
      break;

    case 576:
frtabu's avatar
frtabu committed
307 308 309
      dft(DFT_576,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_576,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_576,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
310 311 312
      break;

    case 600:
frtabu's avatar
frtabu committed
313 314 315
      dft(DFT_600,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_600,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_600,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
316 317 318
      break;

    case 648:
frtabu's avatar
frtabu committed
319 320 321
      dft(DFT_648,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_648,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_648,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
322 323 324
      break;

    case 720:
frtabu's avatar
frtabu committed
325 326 327
      dft(DFT_720,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_720,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_720,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
328 329 330
      break;

    case 768:
frtabu's avatar
frtabu committed
331 332 333
      dft(DFT_768,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_768,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_768,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
334 335 336
      break;

    case 864:
frtabu's avatar
frtabu committed
337 338 339
      dft(DFT_864,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_864,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_864,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
340 341 342
      break;

    case 900:
frtabu's avatar
frtabu committed
343 344 345
      dft(DFT_900,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_900,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_900,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
346 347 348
      break;

    case 960:
frtabu's avatar
frtabu committed
349 350 351
      dft(DFT_960,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_960,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_960,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
352 353 354
      break;

    case 972:
frtabu's avatar
frtabu committed
355 356 357
      dft(DFT_972,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_972,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_972,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
358 359 360
      break;

    case 1080:
frtabu's avatar
frtabu committed
361 362 363
      dft(DFT_1080,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_1080,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_1080,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
364 365 366
      break;

    case 1152:
frtabu's avatar
frtabu committed
367 368 369
      dft(DFT_1152,(int16_t *)idft_in0,(int16_t *)idft_out0,1);
      dft(DFT_1152,(int16_t *)idft_in1,(int16_t *)idft_out1,1);
      dft(DFT_1152,(int16_t *)idft_in2,(int16_t *)idft_out2,1);
370 371 372
      break;

    case 1200:
frtabu's avatar
frtabu committed
373 374 375
      dft(DFT_1200,idft_in0,idft_out0,1);
      dft(DFT_1200,idft_in1,idft_out1,1);
      dft(DFT_1200,idft_in2,idft_out2,1);
376 377 378 379 380 381 382
      break;

    default:
      // should not be reached
      LOG_E( PHY, "Unsupported Msc_PUSCH value of %"PRIu16"\n", Msc_PUSCH );
      return;
  }
383 384

  for (i=0,ip=0; i<Msc_PUSCH; i++,ip+=4) {
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
    z0[i]     = ((uint32_t *)idft_out0)[ip];

    if(LOG_DEBUGFLAG(DEBUG_ULSCH)) {
      LOG_I(PHY,"out0 (%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
            ((int16_t *)&idft_out0[ip])[0],((int16_t *)&idft_out0[ip])[1],
            ((int16_t *)&idft_out0[ip+1])[0],((int16_t *)&idft_out0[ip+1])[1],
            ((int16_t *)&idft_out0[ip+2])[0],((int16_t *)&idft_out0[ip+2])[1],
            ((int16_t *)&idft_out0[ip+3])[0],((int16_t *)&idft_out0[ip+3])[1]);
    }

    z1[i]     = ((uint32_t *)idft_out0)[ip+1];
    z2[i]     = ((uint32_t *)idft_out0)[ip+2];
    z3[i]     = ((uint32_t *)idft_out0)[ip+3];
    z4[i]     = ((uint32_t *)idft_out1)[ip+0];
    z5[i]     = ((uint32_t *)idft_out1)[ip+1];
    z6[i]     = ((uint32_t *)idft_out1)[ip+2];
    z7[i]     = ((uint32_t *)idft_out1)[ip+3];
    z8[i]     = ((uint32_t *)idft_out2)[ip];
    z9[i]     = ((uint32_t *)idft_out2)[ip+1];
404

405
    if (frame_parms->Ncp==0) {
406 407
      z10[i]    = ((uint32_t *)idft_out2)[ip+2];
      z11[i]    = ((uint32_t *)idft_out2)[ip+3];
408 409
    }
  }
410

411
  // conjugate output
412
  for (i=0; i<(Msc_PUSCH>>2); i++) {
413
#if defined(__x86_64__) || defined(__i386__)
414 415 416 417 418 419 420 421 422 423
    ((__m128i *)z0)[i]=_mm_sign_epi16(((__m128i *)z0)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z1)[i]=_mm_sign_epi16(((__m128i *)z1)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z2)[i]=_mm_sign_epi16(((__m128i *)z2)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z3)[i]=_mm_sign_epi16(((__m128i *)z3)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z4)[i]=_mm_sign_epi16(((__m128i *)z4)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z5)[i]=_mm_sign_epi16(((__m128i *)z5)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z6)[i]=_mm_sign_epi16(((__m128i *)z6)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z7)[i]=_mm_sign_epi16(((__m128i *)z7)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z8)[i]=_mm_sign_epi16(((__m128i *)z8)[i],*(__m128i *)&conjugate2[0]);
    ((__m128i *)z9)[i]=_mm_sign_epi16(((__m128i *)z9)[i],*(__m128i *)&conjugate2[0]);
424

425
    if (frame_parms->Ncp==NORMAL) {
426 427
      ((__m128i *)z10)[i]=_mm_sign_epi16(((__m128i *)z10)[i],*(__m128i *)&conjugate2[0]);
      ((__m128i *)z11)[i]=_mm_sign_epi16(((__m128i *)z11)[i],*(__m128i *)&conjugate2[0]);
428
    }
429

430 431 432 433 434 435 436 437 438 439 440
#elif defined(__arm__)
    * &(((int16x8_t *)z0)[i])=vmulq_s16( *&(((int16x8_t *)z0)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z1)[i])=vmulq_s16( *&(((int16x8_t *)z1)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z2)[i])=vmulq_s16( *&(((int16x8_t *)z2)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z3)[i])=vmulq_s16( *&(((int16x8_t *)z3)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z4)[i])=vmulq_s16( *&(((int16x8_t *)z4)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z5)[i])=vmulq_s16( *&(((int16x8_t *)z5)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z6)[i])=vmulq_s16( *&(((int16x8_t *)z6)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z7)[i])=vmulq_s16( *&(((int16x8_t *)z7)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z8)[i])=vmulq_s16( *&(((int16x8_t *)z8)[i]),*(int16x8_t *)&conjugate2[0]);
    * &(((int16x8_t *)z9)[i])=vmulq_s16( *&(((int16x8_t *)z9)[i]),*(int16x8_t *)&conjugate2[0]);
441 442

    if (frame_parms->Ncp==NORMAL) {
443 444
      * &(((int16x8_t *)z10)[i])=vmulq_s16( *&(((int16x8_t *)z10)[i]),*(int16x8_t *)&conjugate2[0]);
      * &(((int16x8_t *)z11)[i])=vmulq_s16( *&(((int16x8_t *)z11)[i]),*(int16x8_t *)&conjugate2[0]);
445 446 447
    }

#endif
448 449
  }

450 451 452 453
#if defined(__x86_64__) || defined(__i386__)
  _mm_empty();
  _m_empty();
#endif
454
}
455

456 457 458 459 460 461





int32_t ulsch_qpsk_llr(LTE_DL_FRAME_PARMS *frame_parms,
462 463 464 465
                       int32_t **rxdataF_comp,
                       int16_t *ulsch_llr,
                       uint8_t symbol,
                       uint16_t nb_rb,
466
                       int16_t **llrp) {
467
#if defined(__x86_64__) || defined(__i386__)
468
  __m128i *rxF=(__m128i *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
469
  __m128i **llrp128 = (__m128i **)llrp;
470
#elif defined(__arm__)
471
  int16x8_t *rxF= (int16x8_t *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
472 473 474
  int16x8_t **llrp128 = (int16x8_t **)llrp;
#endif
  int i;
475

476
  for (i=0; i<(nb_rb*3); i++) {
477 478 479 480 481
    *(*llrp128) = *rxF;
    rxF++;
    (*llrp128)++;
  }

482
#if defined(__x86_64__) || defined(__i386__)
483 484
  _mm_empty();
  _m_empty();
485
#endif
486 487 488 489
  return(0);
}

void ulsch_16qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
490 491 492 493 494
                     int32_t **rxdataF_comp,
                     int16_t *ulsch_llr,
                     int32_t **ul_ch_mag,
                     uint8_t symbol,
                     uint16_t nb_rb,
495 496
                     int16_t **llrp) {
  int i;
497
#if defined(__x86_64__) || defined(__i386__)
498
  __m128i *rxF=(__m128i *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
499 500 501
  __m128i *ch_mag;
  __m128i mmtmpU0;
  __m128i **llrp128=(__m128i **)llrp;
502
  ch_mag =(__m128i *)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
503
#elif defined(__arm__)
504
  int16x8_t *rxF=(int16x8_t *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
505 506 507
  int16x8_t *ch_mag;
  int16x8_t xmm0;
  int16_t **llrp16=llrp;
508
  ch_mag =(int16x8_t *)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
509
#endif
510

511
  for (i=0; i<(nb_rb*3); i++) {
512
#if defined(__x86_64__) || defined(__i386__)
513 514 515 516 517 518
    mmtmpU0 = _mm_abs_epi16(rxF[i]);
    //    print_shorts("tmp0",&tmp0);
    mmtmpU0 = _mm_subs_epi16(ch_mag[i],mmtmpU0);
    (*llrp128)[0] = _mm_unpacklo_epi32(rxF[i],mmtmpU0);
    (*llrp128)[1] = _mm_unpackhi_epi32(rxF[i],mmtmpU0);
    (*llrp128)+=2;
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
#elif defined(__arm__)
    xmm0 = vabsq_s16(rxF[i]);
    xmm0 = vqsubq_s16(ch_mag[i],xmm0);
    (*llrp16)[0] = vgetq_lane_s16(rxF[i],0);
    (*llrp16)[1] = vgetq_lane_s16(xmm0,0);
    (*llrp16)[2] = vgetq_lane_s16(rxF[i],1);
    (*llrp16)[3] = vgetq_lane_s16(xmm0,1);
    (*llrp16)[4] = vgetq_lane_s16(rxF[i],2);
    (*llrp16)[5] = vgetq_lane_s16(xmm0,2);
    (*llrp16)[6] = vgetq_lane_s16(rxF[i],2);
    (*llrp16)[7] = vgetq_lane_s16(xmm0,3);
    (*llrp16)[8] = vgetq_lane_s16(rxF[i],4);
    (*llrp16)[9] = vgetq_lane_s16(xmm0,4);
    (*llrp16)[10] = vgetq_lane_s16(rxF[i],5);
    (*llrp16)[11] = vgetq_lane_s16(xmm0,5);
    (*llrp16)[12] = vgetq_lane_s16(rxF[i],6);
    (*llrp16)[13] = vgetq_lane_s16(xmm0,6);
    (*llrp16)[14] = vgetq_lane_s16(rxF[i],7);
    (*llrp16)[15] = vgetq_lane_s16(xmm0,7);
    (*llrp16)+=16;
#endif
540 541 542 543
    //    print_bytes("rxF[i]",&rxF[i]);
    //    print_bytes("rxF[i+1]",&rxF[i+1]);
  }

544
#if defined(__x86_64__) || defined(__i386__)
545 546
  _mm_empty();
  _m_empty();
547
#endif
548 549 550
}

void ulsch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
551 552 553 554 555 556
                     int32_t **rxdataF_comp,
                     int16_t *ulsch_llr,
                     int32_t **ul_ch_mag,
                     int32_t **ul_ch_magb,
                     uint8_t symbol,
                     uint16_t nb_rb,
557
                     int16_t **llrp) {
558 559 560
  int i;
  int32_t **llrp32=(int32_t **)llrp;
#if defined(__x86_64__) || defined(__i386)
561
  __m128i *rxF=(__m128i *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
562 563
  __m128i *ch_mag,*ch_magb;
  __m128i mmtmpU1,mmtmpU2;
564 565
  ch_mag =(__m128i *)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
  ch_magb =(__m128i *)&ul_ch_magb[0][(symbol*frame_parms->N_RB_DL*12)];
566
#elif defined(__arm__)
567
  int16x8_t *rxF=(int16x8_t *)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
568 569
  int16x8_t *ch_mag,*ch_magb;
  int16x8_t mmtmpU1,mmtmpU2;
570 571
  ch_mag =(int16x8_t *)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
  ch_magb =(int16x8_t *)&ul_ch_magb[0][(symbol*frame_parms->N_RB_DL*12)];
572
#endif
573

574
  if(LOG_DEBUGFLAG(DEBUG_ULSCH)) {
575
    LOG_UI(PHY,"symbol %d: mag %d, magb %d\n",symbol,_mm_extract_epi16(ch_mag[0],0),_mm_extract_epi16(ch_magb[0],0));
576
  }
577

578
  for (i=0; i<(nb_rb*3); i++) {
579
#if defined(__x86_64__) || defined(__i386__)
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
    mmtmpU1 = _mm_abs_epi16(rxF[i]);
    mmtmpU1  = _mm_subs_epi16(ch_mag[i],mmtmpU1);
    mmtmpU2 = _mm_abs_epi16(mmtmpU1);
    mmtmpU2 = _mm_subs_epi16(ch_magb[i],mmtmpU2);
    (*llrp32)[0]  = _mm_extract_epi32(rxF[i],0);
    (*llrp32)[1]  = _mm_extract_epi32(mmtmpU1,0);
    (*llrp32)[2]  = _mm_extract_epi32(mmtmpU2,0);
    (*llrp32)[3]  = _mm_extract_epi32(rxF[i],1);
    (*llrp32)[4]  = _mm_extract_epi32(mmtmpU1,1);
    (*llrp32)[5]  = _mm_extract_epi32(mmtmpU2,1);
    (*llrp32)[6]  = _mm_extract_epi32(rxF[i],2);
    (*llrp32)[7]  = _mm_extract_epi32(mmtmpU1,2);
    (*llrp32)[8]  = _mm_extract_epi32(mmtmpU2,2);
    (*llrp32)[9]  = _mm_extract_epi32(rxF[i],3);
    (*llrp32)[10] = _mm_extract_epi32(mmtmpU1,3);
    (*llrp32)[11] = _mm_extract_epi32(mmtmpU2,3);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
#elif defined(__arm__)
    mmtmpU1 = vabsq_s16(rxF[i]);
    mmtmpU1 = vqsubq_s16(ch_mag[i],mmtmpU1);
    mmtmpU2 = vabsq_s16(mmtmpU1);
    mmtmpU2 = vqsubq_s16(ch_magb[i],mmtmpU2);
    (*llrp32)[0]  = vgetq_lane_s32((int32x4_t)rxF[i],0);
    (*llrp32)[1]  = vgetq_lane_s32((int32x4_t)mmtmpU1,0);
    (*llrp32)[2]  = vgetq_lane_s32((int32x4_t)mmtmpU2,0);
    (*llrp32)[3]  = vgetq_lane_s32((int32x4_t)rxF[i],1);
    (*llrp32)[4]  = vgetq_lane_s32((int32x4_t)mmtmpU1,1);
    (*llrp32)[5]  = vgetq_lane_s32((int32x4_t)mmtmpU2,1);
    (*llrp32)[6]  = vgetq_lane_s32((int32x4_t)rxF[i],2);
    (*llrp32)[7]  = vgetq_lane_s32((int32x4_t)mmtmpU1,2);
    (*llrp32)[8]  = vgetq_lane_s32((int32x4_t)mmtmpU2,2);
    (*llrp32)[9]  = vgetq_lane_s32((int32x4_t)rxF[i],3);
    (*llrp32)[10] = vgetq_lane_s32((int32x4_t)mmtmpU1,3);
    (*llrp32)[11] = vgetq_lane_s32((int32x4_t)mmtmpU2,3);
#endif
614 615
    (*llrp32)+=12;
  }
616

617
#if defined(__x86_64__) || defined(__i386__)
618 619
  _mm_empty();
  _m_empty();
620
#endif
621 622 623
}

void ulsch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
624 625 626 627
                         int32_t **rxdataF_comp,
                         int32_t **ul_ch_mag,
                         int32_t **ul_ch_magb,
                         uint8_t symbol,
628
                         uint16_t nb_rb) {
629
#if defined(__x86_64__) || defined(__i386__)
630 631
  __m128i *rxdataF_comp128_0,*ul_ch_mag128_0,*ul_ch_mag128_0b;
  __m128i *rxdataF_comp128_1,*ul_ch_mag128_1,*ul_ch_mag128_1b;
632 633 634 635
#elif defined(__arm__)
  int16x8_t *rxdataF_comp128_0,*ul_ch_mag128_0,*ul_ch_mag128_0b;
  int16x8_t *rxdataF_comp128_1,*ul_ch_mag128_1,*ul_ch_mag128_1b;
#endif
636 637 638
  int32_t i;

  if (frame_parms->nb_antennas_rx>1) {
639
#if defined(__x86_64__) || defined(__i386__)
640 641 642 643 644 645
    rxdataF_comp128_0   = (__m128i *)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
    rxdataF_comp128_1   = (__m128i *)&rxdataF_comp[1][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_0      = (__m128i *)&ul_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_1      = (__m128i *)&ul_ch_mag[1][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_0b     = (__m128i *)&ul_ch_magb[0][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_1b     = (__m128i *)&ul_ch_magb[1][symbol*frame_parms->N_RB_DL*12];
646 647

    // MRC on each re of rb, both on MF output and magnitude (for 16QAM/64QAM llr computation)
648
    for (i=0; i<nb_rb*3; i++) {
649 650 651
      rxdataF_comp128_0[i] = _mm_adds_epi16(_mm_srai_epi16(rxdataF_comp128_0[i],1),_mm_srai_epi16(rxdataF_comp128_1[i],1));
      ul_ch_mag128_0[i]    = _mm_adds_epi16(_mm_srai_epi16(ul_ch_mag128_0[i],1),_mm_srai_epi16(ul_ch_mag128_1[i],1));
      ul_ch_mag128_0b[i]   = _mm_adds_epi16(_mm_srai_epi16(ul_ch_mag128_0b[i],1),_mm_srai_epi16(ul_ch_mag128_1b[i],1));
652 653 654
      rxdataF_comp128_0[i] = _mm_add_epi16(rxdataF_comp128_0[i],(*(__m128i *)&jitterc[0]));
    }

655 656 657 658 659 660 661 662 663 664 665 666 667
#elif defined(__arm__)
    rxdataF_comp128_0   = (int16x8_t *)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
    rxdataF_comp128_1   = (int16x8_t *)&rxdataF_comp[1][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_0      = (int16x8_t *)&ul_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_1      = (int16x8_t *)&ul_ch_mag[1][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_0b     = (int16x8_t *)&ul_ch_magb[0][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128_1b     = (int16x8_t *)&ul_ch_magb[1][symbol*frame_parms->N_RB_DL*12];

    // MRC on each re of rb, both on MF output and magnitude (for 16QAM/64QAM llr computation)
    for (i=0; i<nb_rb*3; i++) {
      rxdataF_comp128_0[i] = vhaddq_s16(rxdataF_comp128_0[i],rxdataF_comp128_1[i]);
      ul_ch_mag128_0[i]    = vhaddq_s16(ul_ch_mag128_0[i],ul_ch_mag128_1[i]);
      ul_ch_mag128_0b[i]   = vhaddq_s16(ul_ch_mag128_0b[i],ul_ch_mag128_1b[i]);
668
      rxdataF_comp128_0[i] = vqaddq_s16(rxdataF_comp128_0[i],(*(int16x8_t *)&jitterc[0]));
frtabu's avatar
frtabu committed
669
    }
670 671

#endif
672 673
  }

674
#if defined(__x86_64__) || defined(__i386__)
675 676
  _mm_empty();
  _m_empty();
677
#endif
678 679 680
}

void ulsch_extract_rbs_single(int32_t **rxdataF,
681 682 683 684 685
                              int32_t **rxdataF_ext,
                              uint32_t first_rb,
                              uint32_t nb_rb,
                              uint8_t l,
                              uint8_t Ns,
686
                              LTE_DL_FRAME_PARMS *frame_parms) {
687 688 689 690 691
  uint16_t nb_rb1,nb_rb2;
  uint8_t aarx;
  int32_t *rxF,*rxF_ext;
  //uint8_t symbol = l+Ns*frame_parms->symbols_per_tti/2;
  uint8_t symbol = l+((7-frame_parms->Ncp)*(Ns&1)); ///symbol within sub-frame
692
  AssertFatal((frame_parms->nb_antennas_rx>0) && (frame_parms->nb_antennas_rx<5),
693
              "nb_antennas_rx not in (1-4)\n");
694

695
  for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) {
696 697
    nb_rb1 = cmin(cmax((int)(frame_parms->N_RB_UL) - (int)(2*first_rb),(int)0),(int)(2*nb_rb));    // 2 times no. RBs before the DC
    nb_rb2 = 2*nb_rb - nb_rb1;                                   // 2 times no. RBs after the DC
698

699
    if(LOG_DEBUGFLAG(DEBUG_ULSCH)) {
700
      LOG_UI(PHY,"ulsch_extract_rbs_single: 2*nb_rb1 = %d, 2*nb_rb2 = %d\n",nb_rb1,nb_rb2);
701
    }
702 703

    rxF_ext   = &rxdataF_ext[aarx][(symbol*frame_parms->N_RB_UL*12)];
704

705 706 707 708
    if (nb_rb1) {
      rxF = &rxdataF[aarx][(first_rb*12 + frame_parms->first_carrier_offset + symbol*frame_parms->ofdm_symbol_size)];
      memcpy(rxF_ext, rxF, nb_rb1*6*sizeof(int));
      rxF_ext += nb_rb1*6;
Raymond Knopp's avatar
 
Raymond Knopp committed
709

710
      if (nb_rb2)  {
711 712 713 714 715
        rxF = &rxdataF[aarx][(symbol*frame_parms->ofdm_symbol_size)];
        memcpy(rxF_ext, rxF, nb_rb2*6*sizeof(int));
        rxF_ext += nb_rb2*6;
      }
    } else { //there is only data in the second half
716 717 718 719 720 721 722 723
      rxF = &rxdataF[aarx][(6*(2*first_rb - frame_parms->N_RB_UL) + symbol*frame_parms->ofdm_symbol_size)];
      memcpy(rxF_ext, rxF, nb_rb2*6*sizeof(int));
      rxF_ext += nb_rb2*6;
    }
  }
}

void ulsch_correct_ext(int32_t **rxdataF_ext,
724 725 726
                       int32_t **rxdataF_ext2,
                       uint16_t symbol,
                       LTE_DL_FRAME_PARMS *frame_parms,
727
                       uint16_t nb_rb) {
728 729 730
  int32_t i,j,aarx;
  int32_t *rxF_ext2,*rxF_ext;

731
  for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) {
732 733 734
    rxF_ext2 = &rxdataF_ext2[aarx][symbol*12*frame_parms->N_RB_UL];
    rxF_ext  = &rxdataF_ext[aarx][2*symbol*12*frame_parms->N_RB_UL];

735 736
    for (i=0,j=0; i<12*nb_rb; i++,j+=2) {
      rxF_ext2[i] = rxF_ext[j];
737 738 739 740 741 742 743
    }
  }
}



void ulsch_channel_compensation(int32_t **rxdataF_ext,
744 745 746 747 748 749 750 751
                                int32_t **ul_ch_estimates_ext,
                                int32_t **ul_ch_mag,
                                int32_t **ul_ch_magb,
                                int32_t **rxdataF_comp,
                                LTE_DL_FRAME_PARMS *frame_parms,
                                uint8_t symbol,
                                uint8_t Qm,
                                uint16_t nb_rb,
752
                                uint8_t output_shift) {
753
  uint16_t rb;
754
#if defined(__x86_64__) || defined(__i386__)
755 756 757
  __m128i *ul_ch128,*ul_ch_mag128,*ul_ch_mag128b,*rxdataF128,*rxdataF_comp128;
  uint8_t aarx;//,symbol_mod;
  __m128i mmtmpU0,mmtmpU1,mmtmpU2,mmtmpU3;
758 759 760 761 762 763 764
#elif defined(__arm__)
  int16x4_t *ul_ch128,*rxdataF128;
  int16x8_t *ul_ch_mag128,*ul_ch_mag128b,*rxdataF_comp128;
  uint8_t aarx;//,symbol_mod;
  int32x4_t mmtmpU0,mmtmpU1,mmtmpU0b,mmtmpU1b;
  int16_t conj[4]__attribute__((aligned(16))) = {1,-1,1,-1};
  int32x4_t output_shift128 = vmovq_n_s32(-(int32_t)output_shift);
765
#endif
766 767

  for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) {
768
#if defined(__x86_64__) || defined(__i386__)
769 770 771 772 773
    ul_ch128          = (__m128i *)&ul_ch_estimates_ext[aarx][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128      = (__m128i *)&ul_ch_mag[aarx][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128b     = (__m128i *)&ul_ch_magb[aarx][symbol*frame_parms->N_RB_DL*12];
    rxdataF128        = (__m128i *)&rxdataF_ext[aarx][symbol*frame_parms->N_RB_DL*12];
    rxdataF_comp128   = (__m128i *)&rxdataF_comp[aarx][symbol*frame_parms->N_RB_DL*12];
774 775 776 777 778 779 780
#elif defined(__arm__)
    ul_ch128          = (int16x4_t *)&ul_ch_estimates_ext[aarx][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128      = (int16x8_t *)&ul_ch_mag[aarx][symbol*frame_parms->N_RB_DL*12];
    ul_ch_mag128b     = (int16x8_t *)&ul_ch_magb[aarx][symbol*frame_parms->N_RB_DL*12];
    rxdataF128        = (int16x4_t *)&rxdataF_ext[aarx][symbol*frame_parms->N_RB_DL*12];
    rxdataF_comp128   = (int16x8_t *)&rxdataF_comp[aarx][symbol*frame_parms->N_RB_DL*12];
#endif
781

782
    for (rb=0; rb<nb_rb; rb++) {
783
      LOG_D(PHY,"comp: symbol %d rb %d\n",symbol,rb);
784
      // just compute channel magnitude without scaling, this is done after equalization for SC-FDMA
785
#if defined(__x86_64__) || defined(__i386__)
786 787 788 789 790 791 792 793 794 795 796
      mmtmpU0 = _mm_madd_epi16(ul_ch128[0],ul_ch128[0]);
      mmtmpU0 = _mm_srai_epi32(mmtmpU0,output_shift);
      mmtmpU1 = _mm_madd_epi16(ul_ch128[1],ul_ch128[1]);
      mmtmpU1 = _mm_srai_epi32(mmtmpU1,output_shift);
      mmtmpU0 = _mm_packs_epi32(mmtmpU0,mmtmpU1);
      ul_ch_mag128[0] = _mm_unpacklo_epi16(mmtmpU0,mmtmpU0);
      ul_ch_mag128[1] = _mm_unpackhi_epi16(mmtmpU0,mmtmpU0);
      mmtmpU0 = _mm_madd_epi16(ul_ch128[2],ul_ch128[2]);
      mmtmpU0 = _mm_srai_epi32(mmtmpU0,output_shift);
      mmtmpU1 = _mm_packs_epi32(mmtmpU0,mmtmpU0);
      ul_ch_mag128[2] = _mm_unpacklo_epi16(mmtmpU1,mmtmpU1);
797
      LOG_D(PHY,"comp: symbol %d rb %d => %d,%d,%d (output_shift %d)\n",symbol,rb,*((int16_t *)&ul_ch_mag128[0]),*((int16_t *)&ul_ch_mag128[1]),*((int16_t *)&ul_ch_mag128[2]),output_shift);
798
#elif defined(__arm__)
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
      mmtmpU0 = vmull_s16(ul_ch128[0], ul_ch128[0]);
      mmtmpU0 = vqshlq_s32(vqaddq_s32(mmtmpU0,vrev64q_s32(mmtmpU0)),-output_shift128);
      mmtmpU1 = vmull_s16(ul_ch128[1], ul_ch128[1]);
      mmtmpU1 = vqshlq_s32(vqaddq_s32(mmtmpU1,vrev64q_s32(mmtmpU1)),-output_shift128);
      ul_ch_mag128[0] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
      mmtmpU0 = vmull_s16(ul_ch128[2], ul_ch128[2]);
      mmtmpU0 = vqshlq_s32(vqaddq_s32(mmtmpU0,vrev64q_s32(mmtmpU0)),-output_shift128);
      mmtmpU1 = vmull_s16(ul_ch128[3], ul_ch128[3]);
      mmtmpU1 = vqshlq_s32(vqaddq_s32(mmtmpU1,vrev64q_s32(mmtmpU1)),-output_shift128);
      ul_ch_mag128[1] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
      mmtmpU0 = vmull_s16(ul_ch128[4], ul_ch128[4]);
      mmtmpU0 = vqshlq_s32(vqaddq_s32(mmtmpU0,vrev64q_s32(mmtmpU0)),-output_shift128);
      mmtmpU1 = vmull_s16(ul_ch128[5], ul_ch128[5]);
      mmtmpU1 = vqshlq_s32(vqaddq_s32(mmtmpU1,vrev64q_s32(mmtmpU1)),-output_shift128);
      ul_ch_mag128[2] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
814
#endif
815
#if defined(__x86_64__) || defined(__i386__)
816 817
      // multiply by conjugated channel
      mmtmpU0 = _mm_madd_epi16(ul_ch128[0],rxdataF128[0]);
818
      //        print_ints("re",&mmtmpU0);
819 820 821
      // mmtmpU0 contains real part of 4 consecutive outputs (32-bit)
      mmtmpU1 = _mm_shufflelo_epi16(ul_ch128[0],_MM_SHUFFLE(2,3,0,1));
      mmtmpU1 = _mm_shufflehi_epi16(mmtmpU1,_MM_SHUFFLE(2,3,0,1));
822
      mmtmpU1 = _mm_sign_epi16(mmtmpU1,*(__m128i *)&conjugate[0]);
823 824 825 826
      mmtmpU1 = _mm_madd_epi16(mmtmpU1,rxdataF128[0]);
      //      print_ints("im",&mmtmpU1);
      // mmtmpU1 contains imag part of 4 consecutive outputs (32-bit)
      mmtmpU0 = _mm_srai_epi32(mmtmpU0,output_shift);
827
      //  print_ints("re(shift)",&mmtmpU0);
828
      mmtmpU1 = _mm_srai_epi32(mmtmpU1,output_shift);
829
      //  print_ints("im(shift)",&mmtmpU1);
830 831
      mmtmpU2 = _mm_unpacklo_epi32(mmtmpU0,mmtmpU1);
      mmtmpU3 = _mm_unpackhi_epi32(mmtmpU0,mmtmpU1);
832 833
      //        print_ints("c0",&mmtmpU2);
      //  print_ints("c1",&mmtmpU3);
834 835
      rxdataF_comp128[0] = _mm_packs_epi32(mmtmpU2,mmtmpU3);
      /*
836 837 838
              print_shorts("rx:",&rxdataF128[0]);
              print_shorts("ch:",&ul_ch128[0]);
              print_shorts("pack:",&rxdataF_comp128[0]);
839 840 841 842 843 844
      */
      // multiply by conjugated channel
      mmtmpU0 = _mm_madd_epi16(ul_ch128[1],rxdataF128[1]);
      // mmtmpU0 contains real part of 4 consecutive outputs (32-bit)
      mmtmpU1 = _mm_shufflelo_epi16(ul_ch128[1],_MM_SHUFFLE(2,3,0,1));
      mmtmpU1 = _mm_shufflehi_epi16(mmtmpU1,_MM_SHUFFLE(2,3,0,1));
845
      mmtmpU1 = _mm_sign_epi16(mmtmpU1,*(__m128i *)conjugate);
846 847 848 849 850 851 852
      mmtmpU1 = _mm_madd_epi16(mmtmpU1,rxdataF128[1]);
      // mmtmpU1 contains imag part of 4 consecutive outputs (32-bit)
      mmtmpU0 = _mm_srai_epi32(mmtmpU0,output_shift);
      mmtmpU1 = _mm_srai_epi32(mmtmpU1,output_shift);
      mmtmpU2 = _mm_unpacklo_epi32(mmtmpU0,mmtmpU1);
      mmtmpU3 = _mm_unpackhi_epi32(mmtmpU0,mmtmpU1);
      rxdataF_comp128[1] = _mm_packs_epi32(mmtmpU2,mmtmpU3);
853 854 855
      //        print_shorts("rx:",rxdataF128[1]);
      //        print_shorts("ch:",ul_ch128[1]);
      //        print_shorts("pack:",rxdataF_comp128[1]);
856 857 858 859 860
      //       multiply by conjugated channel
      mmtmpU0 = _mm_madd_epi16(ul_ch128[2],rxdataF128[2]);
      // mmtmpU0 contains real part of 4 consecutive outputs (32-bit)
      mmtmpU1 = _mm_shufflelo_epi16(ul_ch128[2],_MM_SHUFFLE(2,3,0,1));
      mmtmpU1 = _mm_shufflehi_epi16(mmtmpU1,_MM_SHUFFLE(2,3,0,1));
861
      mmtmpU1 = _mm_sign_epi16(mmtmpU1,*(__m128i *)conjugate);
862 863 864 865 866 867 868
      mmtmpU1 = _mm_madd_epi16(mmtmpU1,rxdataF128[2]);
      // mmtmpU1 contains imag part of 4 consecutive outputs (32-bit)
      mmtmpU0 = _mm_srai_epi32(mmtmpU0,output_shift);
      mmtmpU1 = _mm_srai_epi32(mmtmpU1,output_shift);
      mmtmpU2 = _mm_unpacklo_epi32(mmtmpU0,mmtmpU1);
      mmtmpU3 = _mm_unpackhi_epi32(mmtmpU0,mmtmpU1);
      rxdataF_comp128[2] = _mm_packs_epi32(mmtmpU2,mmtmpU3);
869 870
      //        print_shorts("rx:",rxdataF128[2]);
      //        print_shorts("ch:",ul_ch128[2]);
871 872
      //        print_shorts("pack:",rxdataF_comp128[2]);
      // Add a jitter to compensate for the saturation in "packs" resulting in a bias on the DC after IDFT
873 874 875
      rxdataF_comp128[0] = _mm_add_epi16(rxdataF_comp128[0],(*(__m128i *)&jitter[0]));
      rxdataF_comp128[1] = _mm_add_epi16(rxdataF_comp128[1],(*(__m128i *)&jitter[0]));
      rxdataF_comp128[2] = _mm_add_epi16(rxdataF_comp128[2],(*(__m128i *)&jitter[0]));
876 877 878 879 880
      ul_ch128+=3;
      ul_ch_mag128+=3;
      ul_ch_mag128b+=3;
      rxdataF128+=3;
      rxdataF_comp128+=3;
881
#elif defined(__arm__)
882
      mmtmpU0 = vmull_s16(ul_ch128[0], rxdataF128[0]);
883
      //mmtmpU0 = [Re(ch[0])Re(rx[0]) Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1]) Im(ch[1])Im(ch[1])]
884
      mmtmpU1 = vmull_s16(ul_ch128[1], rxdataF128[1]);
885
      //mmtmpU1 = [Re(ch[2])Re(rx[2]) Im(ch[2])Im(ch[2]) Re(ch[3])Re(rx[3]) Im(ch[3])Im(ch[3])]
886
      mmtmpU0 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0),vget_high_s32(mmtmpU0)),
887 888 889
                             vpadd_s32(vget_low_s32(mmtmpU1),vget_high_s32(mmtmpU1)));
      //mmtmpU0 = [Re(ch[0])Re(rx[0])+Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1])+Im(ch[1])Im(ch[1]) Re(ch[2])Re(rx[2])+Im(ch[2])Im(ch[2]) Re(ch[3])Re(rx[3])+Im(ch[3])Im(ch[3])]
      mmtmpU0b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[0],*(int16x4_t *)conj)), rxdataF128[0]);
890
      //mmtmpU0 = [-Im(ch[0])Re(rx[0]) Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1]) Re(ch[1])Im(rx[1])]
891
      mmtmpU1b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[1],*(int16x4_t *)conj)), rxdataF128[1]);
892 893
      //mmtmpU0 = [-Im(ch[2])Re(rx[2]) Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3]) Re(ch[3])Im(rx[3])]
      mmtmpU1 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0b),vget_high_s32(mmtmpU0b)),
894
                             vpadd_s32(vget_low_s32(mmtmpU1b),vget_high_s32(mmtmpU1b)));
895 896 897 898 899 900 901
      //mmtmpU1 = [-Im(ch[0])Re(rx[0])+Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1])+Re(ch[1])Im(rx[1]) -Im(ch[2])Re(rx[2])+Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3])+Re(ch[3])Im(rx[3])]
      mmtmpU0 = vqshlq_s32(mmtmpU0,-output_shift128);
      mmtmpU1 = vqshlq_s32(mmtmpU1,-output_shift128);
      rxdataF_comp128[0] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
      mmtmpU0 = vmull_s16(ul_ch128[2], rxdataF128[2]);
      mmtmpU1 = vmull_s16(ul_ch128[3], rxdataF128[3]);
      mmtmpU0 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0),vget_high_s32(mmtmpU0)),
902 903 904
                             vpadd_s32(vget_low_s32(mmtmpU1),vget_high_s32(mmtmpU1)));
      mmtmpU0b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[2],*(int16x4_t *)conj)), rxdataF128[2]);
      mmtmpU1b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[3],*(int16x4_t *)conj)), rxdataF128[3]);
905
      mmtmpU1 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0b),vget_high_s32(mmtmpU0b)),
906
                             vpadd_s32(vget_low_s32(mmtmpU1b),vget_high_s32(mmtmpU1b)));
907 908 909 910 911 912
      mmtmpU0 = vqshlq_s32(mmtmpU0,-output_shift128);
      mmtmpU1 = vqshlq_s32(mmtmpU1,-output_shift128);
      rxdataF_comp128[1] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
      mmtmpU0 = vmull_s16(ul_ch128[4], rxdataF128[4]);
      mmtmpU1 = vmull_s16(ul_ch128[5], rxdataF128[5]);
      mmtmpU0 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0),vget_high_s32(mmtmpU0)),
913 914 915
                             vpadd_s32(vget_low_s32(mmtmpU1),vget_high_s32(mmtmpU1)));
      mmtmpU0b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[4],*(int16x4_t *)conj)), rxdataF128[4]);
      mmtmpU1b = vmull_s16(vrev32_s16(vmul_s16(ul_ch128[5],*(int16x4_t *)conj)), rxdataF128[5]);
916
      mmtmpU1 = vcombine_s32(vpadd_s32(vget_low_s32(mmtmpU0b),vget_high_s32(mmtmpU0b)),
917
                             vpadd_s32(vget_low_s32(mmtmpU1b),vget_high_s32(mmtmpU1b)));
918 919 920 921
      mmtmpU0 = vqshlq_s32(mmtmpU0,-output_shift128);
      mmtmpU1 = vqshlq_s32(mmtmpU1,-output_shift128);
      rxdataF_comp128[2] = vcombine_s16(vmovn_s32(mmtmpU0),vmovn_s32(mmtmpU1));
      // Add a jitter to compensate for the saturation in "packs" resulting in a bias on the DC after IDFT
922 923 924
      rxdataF_comp128[0] = vqaddq_s16(rxdataF_comp128[0],(*(int16x8_t *)&jitter[0]));
      rxdataF_comp128[1] = vqaddq_s16(rxdataF_comp128[1],(*(int16x8_t *)&jitter[0]));
      rxdataF_comp128[2] = vqaddq_s16(rxdataF_comp128[2],(*(int16x8_t *)&jitter[0]));
925 926 927 928 929
      ul_ch128+=6;
      ul_ch_mag128+=3;
      ul_ch_mag128b+=3;
      rxdataF128+=6;
      rxdataF_comp128+=3;
930
#endif
931 932 933
    }
  }

934
#if defined(__x86_64__) || defined(__i386__)
935 936
  _mm_empty();
  _m_empty();
937
#endif
938
}
939 940

void ulsch_channel_level(int32_t **drs_ch_estimates_ext,
941 942
                         LTE_DL_FRAME_PARMS *frame_parms,
                         int32_t *avg,
943
                         uint16_t nb_rb) {
944 945
  int16_t rb;
  uint8_t aarx;
946
#if defined(__x86_64__) || defined(__i386__)
947
  __m128i *ul_ch128;
948
  __m128 avg128U;
949
#elif defined(__arm__)
950
  int32x4_t avg128U;
951 952
  int16x4_t *ul_ch128;
#endif
953

954
  for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) {
955
    //clear average level
956
#if defined(__x86_64__) || defined(__i386__)
957
    avg128U = _mm_setzero_ps();
958 959
    ul_ch128=(__m128i *)drs_ch_estimates_ext[aarx];

960
    for (rb=0; rb<nb_rb; rb++) {
961 962 963
      avg128U = _mm_add_ps(avg128U,_mm_cvtepi32_ps(_mm_madd_epi16(ul_ch128[0],ul_ch128[0])));
      avg128U = _mm_add_ps(avg128U,_mm_cvtepi32_ps(_mm_madd_epi16(ul_ch128[1],ul_ch128[1])));
      avg128U = _mm_add_ps(avg128U,_mm_cvtepi32_ps(_mm_madd_epi16(ul_ch128[2],ul_ch128[2])));
964
      ul_ch128+=3;
965
    }
966

967 968 969 970 971
#elif defined(__arm__)
    avg128U = vdupq_n_s32(0);
    ul_ch128=(int16x4_t *)drs_ch_estimates_ext[aarx];

    for (rb=0; rb<nb_rb; rb++) {
972 973 974 975 976 977 978
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[0],ul_ch128[0]));
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[1],ul_ch128[1]));
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[2],ul_ch128[2]));
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[3],ul_ch128[3]));
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[4],ul_ch128[4]));
      avg128U = vqaddq_s32(avg128U,vmull_s16(ul_ch128[5],ul_ch128[5]));
      ul_ch128+=6;
979 980 981
    }

#endif
982
    DevAssert( nb_rb );
983 984 985 986
    avg[aarx] = (int)((((float *)&avg128U)[0] +
                       ((float *)&avg128U)[1] +
                       ((float *)&avg128U)[2] +
                       ((float *)&avg128U)[3])/(float)(nb_rb*12));
987
  }
988

989
#if defined(__x86_64__) || defined(__i386__)
990 991
  _mm_empty();
  _m_empty();
992
#endif
993 994
}

995
int ulsch_power_LUT[750];
996

997
void init_ulsch_power_LUT(void) {
998 999
  int i;

1000
  for (i=0; i<750; i++) ulsch_power_LUT[i] = (int)ceil((pow(2.0,(double)i/100) - 1.0));
1001
}
1002

1003
void rx_ulsch(PHY_VARS_eNB *eNB,
1004
              L1_rxtx_proc_t *proc,
1005 1006
              uint8_t UE_id) {
  LTE_eNB_ULSCH_t **ulsch = eNB->ulsch;
1007
  // flagMag = 0;
1008 1009 1010
  LTE_eNB_COMMON *common_vars = &eNB->common_vars;
  LTE_eNB_PUSCH *pusch_vars = eNB->pusch_vars[UE_id];
  LTE_DL_FRAME_PARMS *frame_parms = &eNB->frame_parms;
1011 1012 1013
  uint32_t l,i;
  int32_t avgs;
  uint8_t log2_maxh=0,aarx;
1014
  int32_t avgU[eNB->frame_parms.nb_antennas_rx];
1015
  //  uint8_t harq_pid = ( ulsch->RRCConnRequest_flag== 0) ? subframe2harq_pid_tdd(frame_parms->tdd_config,subframe) : 0;
1016
  uint8_t harq_pid;
Raymond Knopp's avatar
 
Raymond Knopp committed
1017
  uint8_t Qm;
1018
  int16_t *llrp;
1019
  int subframe = proc->subframe_rx;
Raymond Knopp's avatar
 
Raymond Knopp committed
1020

1021
  if (ulsch[UE_id]->ue_type > 0) harq_pid =0;
1022 1023 1024 1025
  else {
    harq_pid = subframe2harq_pid(frame_parms,proc->frame_rx,subframe);
  }

1026
  Qm = ulsch[UE_id]->harq_processes[harq_pid]->Qm;
1027

1028
  if(LOG_DEBUGFLAG(DEBUG_ULSCH)) {
1029
    LOG_I(PHY,"rx_ulsch: harq_pid %d, nb_rb %d first_rb %d\n",harq_pid,ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,ulsch[UE_id]->harq_processes[harq_pid]->first_rb);
1030
  }
1031

1032
  AssertFatal(ulsch[UE_id]->harq_processes[harq_pid]->nb_rb > 0,
1033
              "PUSCH (%d/%x) nb_rb=0!\n", harq_pid,ulsch[UE_id]->rnti);
1034

1035 1036 1037 1038 1039 1040 1041 1042
  for (l=0; l<(frame_parms->symbols_per_tti-ulsch[UE_id]->harq_processes[harq_pid]->srs_active); l++) {
    if(LOG_DEBUGFLAG(DEBUG_ULSCH)) {
      LOG_I(PHY,"rx_ulsch : symbol %d (first_rb %d,nb_rb %d), rxdataF %p, rxdataF_ext %p\n",l,
            ulsch[UE_id]->harq_processes[harq_pid]->first_rb,
            ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
            common_vars->rxdataF,
            pusch_vars->rxdataF_ext);
    }
1043

1044 1045
    ulsch_extract_rbs_single(common_vars->rxdataF,
                             pusch_vars->rxdataF_ext,
1046 1047 1048 1049 1050
                             ulsch[UE_id]->harq_processes[harq_pid]->first_rb,
                             ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
                             l%(frame_parms->symbols_per_tti/2),
                             l/(frame_parms->symbols_per_tti/2),
                             frame_parms);
1051
    lte_ul_channel_estimation(eNB,proc,
1052 1053
                              UE_id,
                              l%(frame_parms->symbols_per_tti/2),
1054
                              l/(frame_parms->symbols_per_tti/2));
1055
  }
1056

1057
  int correction_factor = 1;
1058
  int deltaMCS=1;
1059
  int MPR_times_100Ks;
1060 1061

  if (deltaMCS==1) {
1062 1063 1064
    // Note we're using TBS instead of sumKr, since didn't run segmentation yet!
    MPR_times_100Ks = 500*ulsch[UE_id]->harq_processes[harq_pid]->TBS/(ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12*4*ulsch[UE_id]->harq_processes[harq_pid]->Nsymb_pusch);
    AssertFatal(MPR_times_100Ks < 750 && MPR_times_100Ks >= 0,"Impossible value for MPR_times_100Ks %d (TBS %d,Nre %d)\n",
1065 1066
                MPR_times_100Ks,ulsch[UE_id]->harq_processes[harq_pid]->TBS,
                (ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12*4*ulsch[UE_id]->harq_processes[harq_pid]->Nsymb_pusch));
1067 1068

    if (MPR_times_100Ks > 0) correction_factor = ulsch_power_LUT[MPR_times_100Ks];
1069
  }
1070

1071 1072
  for (i=0; i<frame_parms->nb_antennas_rx; i++) {
    pusch_vars->ulsch_power[i] = signal_energy_nodc(pusch_vars->drs_ch_estimates[i],
1073 1074 1075
                                 ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12)/correction_factor;
    LOG_D(PHY,"%4.4d.%d power harq_pid %d rb %2.2d TBS %2.2d (MPR_times_Ks %d correction %d)  power %d dBtimes10\n", proc->frame_rx, proc->subframe_rx, harq_pid,
          ulsch[UE_id]->harq_processes[harq_pid]->nb_rb, ulsch[UE_id]->harq_processes[harq_pid]->TBS,MPR_times_100Ks,correction_factor,dB_fixed_times10(pusch_vars->ulsch_power[i]));
1076 1077
  }

1078
  ulsch_channel_level(pusch_vars->drs_ch_estimates,
1079 1080 1081
                      frame_parms,
                      avgU,
                      ulsch[UE_id]->harq_processes[harq_pid]->nb_rb);
1082
  LOG_D(PHY,"[ULSCH] avg[0] %d\n",avgU[0]);
1083
  avgs = 0;
1084

1085
  for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++)
1086
    avgs = cmax(avgs,avgU[aarx]);
1087

1088 1089
  //      log2_maxh = 4+(log2_approx(avgs)/2);
  log2_maxh = (log2_approx(avgs)/2)+ log2_approx(frame_parms->nb_antennas_rx-1)+4;
1090 1091
  LOG_D(PHY,"[ULSCH] log2_maxh = %d (%d,%d)\n",log2_maxh,avgU[0],avgs);

1092
  for (l=0; l<(frame_parms->symbols_per_tti-ulsch[UE_id]->harq_processes[harq_pid]->srs_active); l++) {
1093
    if (((frame_parms->Ncp == 0) && ((l==3) || (l==10)))||   // skip pilots
1094
        ((frame_parms->Ncp == 1) && ((l==2) || (l==8)))) {
1095
      l++;
1096
    }
1097

1098
    ulsch_channel_compensation(
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
      pusch_vars->rxdataF_ext,
      pusch_vars->drs_ch_estimates,
      pusch_vars->ul_ch_mag,
      pusch_vars->ul_ch_magb,
      pusch_vars->rxdataF_comp,
      frame_parms,
      l,
      Qm,
      ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
      log2_maxh); // log2_maxh+I0_shift
1109

1110 1111
    if (frame_parms->nb_antennas_rx > 1)
      ulsch_detection_mrc(frame_parms,
1112 1113 1114 1115 1116 1117 1118
                          pusch_vars->rxdataF_comp,
                          pusch_vars->ul_ch_mag,
                          pusch_vars->ul_ch_magb,
                          l,
                          ulsch[UE_id]->harq_processes[harq_pid]->nb_rb);

    //    if ((eNB->measurements.n0_power_dB[0]+3)<pusch_vars->ulsch_power[0])
1119 1120
    if (23<pusch_vars->ulsch_power[0]) {
      freq_equalization(frame_parms,
1121 1122 1123 1124 1125 1126
                        pusch_vars->rxdataF_comp,
                        pusch_vars->ul_ch_mag,
                        pusch_vars->ul_ch_magb,
                        l,
                        ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12,
                        Qm);
1127
    }
1128
  }
1129

1130
  lte_idft(frame_parms,
1131
           (uint32_t *)pusch_vars->rxdataF_comp[0],
1132
           ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12);
1133
  llrp = (int16_t *)&pusch_vars->llr[0];
Cedric Roux's avatar
Cedric Roux committed
1134
  T(T_ENB_PHY_PUSCH_IQ, T_INT(0), T_INT(ulsch[UE_id]->rnti), T_INT(proc->frame_rx),
1135
    T_INT(subframe), T_INT(ulsch[UE_id]->harq_processes[harq_pid]->nb_rb),
1136
    T_INT(frame_parms->N_RB_UL), T_INT(frame_parms->symbols_per_tti),
1137
    T_BUFFER(pusch_vars->rxdataF_comp[0],
1138 1139
             2 * /* ulsch[UE_id]->harq_processes[harq_pid]->nb_rb */ frame_parms->N_RB_UL *12*frame_parms->symbols_per_tti*2));

1140
  for (l=0; l<frame_parms->symbols_per_tti-ulsch[UE_id]->harq_processes[harq_pid]->srs_active; l++) {
1141
    if (((frame_parms->Ncp == 0) && ((l==3) || (l==10)))||   // skip pilots
1142
        ((frame_parms->Ncp == 1) && ((l==2) || (l==8)))) {
1143
      l++;
1144
    }
1145 1146

    switch (Qm) {
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
      case 2 :
        ulsch_qpsk_llr(frame_parms,
                       pusch_vars->rxdataF_comp,
                       pusch_vars->llr,
                       l,
                       ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
                       &llrp);
        break;

      case 4 :
        ulsch_16qam_llr(frame_parms,
                        pusch_vars->rxdataF_comp,
                        pusch_vars->llr,
                        pusch_vars->ul_ch_mag,
                        l,ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
                        &llrp);
        break;

      case 6 :
        ulsch_64qam_llr(frame_parms,
                        pusch_vars->rxdataF_comp,
                        pusch_vars->llr,
                        pusch_vars->ul_ch_mag,
                        pusch_vars->ul_ch_magb,
                        l,ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
                        &llrp);
        break;

      default:
        LOG_E(PHY,"ulsch_demodulation.c (rx_ulsch): Unknown Qm!!!!\n");
        break;
1178 1179 1180 1181
    }
  }
}

1182
void rx_ulsch_emul(PHY_VARS_eNB *eNB,
1183 1184
                   L1_rxtx_proc_t *proc,
                   uint8_t UE_index) {
1185
  LOG_I(PHY,"[PHY] EMUL eNB %d rx_ulsch_emul : subframe %d, UE_index %d\n",eNB->Mod_id,proc->subframe_rx,UE_index);
1186 1187
  eNB->pusch_vars[UE_index]->ulsch_power[0] = 31622; //=45dB;
  eNB->pusch_vars[UE_index]->ulsch_power[1] = 31622; //=45dB;
1188 1189 1190
}


1191
void dump_ulsch(PHY_VARS_eNB *eNB,int frame,int subframe,uint8_t UE_id,int round) {
1192
  uint32_t nsymb = (eNB->frame_parms.Ncp == 0) ? 14 : 12;
Raymond Knopp's avatar
 
Raymond Knopp committed
1193
  uint8_t harq_pid;
1194
  char fname[100],vname[100];
1195
  harq_pid = subframe2harq_pid(&eNB->frame_parms,frame,subframe);
1196 1197
  LOG_UI(PHY,"Dumping ULSCH in subframe %d with harq_pid %d, round %d for NB_rb %d, TBS %d, Qm %d, N_symb %d\n",
         subframe,harq_pid,round,eNB->ulsch[UE_id]->harq_processes[harq_pid]->nb_rb,
1198
         eNB->ulsch[UE_id]->harq_processes[harq_pid]->TBS,eNB->ulsch[UE_id]->harq_processes[harq_pid]->Qm,
1199
         eNB->ulsch[UE_id]->harq_processes[harq_pid]->Nsymb_pusch);
1200 1201
  sprintf(fname,"/tmp/ulsch_r%d_d",round);
  sprintf(vname,"/tmp/ulsch_r%d_dseq",round);
1202
  LOG_UM(fname,vname,&eNB->ulsch[UE_id]->harq_processes[harq_pid]->d[0][96],
1203 1204
         eNB->ulsch[UE_id]->harq_processes[harq_pid]->Kplus*3,1,0);

1205 1206 1207
  if (eNB->common_vars.rxdata) {
    sprintf(fname,"/tmp/rxsig0_r%d.m",round);
    sprintf(vname,"rxs0_r%d",round);
1208
    LOG_UM(fname,vname, &eNB->common_vars.rxdata[0][0],eNB->frame_parms.samples_per_tti*10,1,1);
1209

1210 1211
    if (eNB->frame_parms.nb_antennas_rx>1)
      if (eNB->common_vars.rxdata) {
1212 1213 1214
        sprintf(fname,"/tmp/rxsig1_r%d.m",round);
        sprintf(vname,"rxs1_r%d",round);
        LOG_UM(fname,vname, &eNB->common_vars.rxdata[1][0],eNB->frame_parms.samples_per_tti*10,1,1);
1215 1216
      }
  }
1217

1218 1219
  sprintf(fname,"/tmp/rxsigF0_r%d.m",round);
  sprintf(vname,"rxsF0_r%d",round);
1220
  LOG_UM(fname,vname, (void *)&eNB->common_vars.rxdataF[0][0],eNB->frame_parms.ofdm_symbol_size*nsymb,1,1);
1221

1222 1223 1224
  if (eNB->frame_parms.nb_antennas_rx>1) {
    sprintf(fname,"/tmp/rxsigF1_r%d.m",round);
    sprintf(vname,"rxsF1_r%d",round);
1225
    LOG_UM(vname,fname, &eNB->common_vars.rxdataF[1][0],eNB->frame_parms.ofdm_symbol_size*nsymb,1,1);
1226
  }
1227

1228 1229
  sprintf(fname,"/tmp/rxsigF0_ext_r%d.m",round);
  sprintf(vname,"rxsF0_ext_r%d",round);
1230
  LOG_UM(fname,vname, &eNB->pusch_vars[UE_id]->rxdataF_ext[0][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1231

1232 1233 1234
  if (eNB->frame_parms.nb_antennas_rx>1) {
    sprintf(fname,"/tmp/rxsigF1_ext_r%d.m",round);
    sprintf(vname,"rxsF1_ext_r%d",round);
1235
    LOG_UM(fname,vname,&eNB->pusch_vars[UE_id]->rxdataF_ext[1][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1236
  }
1237

1238
  /*
1239
  if (eNB->srs_vars[UE_id].srs_ch_estimates) LOG_UM("/tmp/srs_est0.m","srsest0",eNB->srs_vars[UE_id].srs_ch_estimates[0],eNB->frame_parms.ofdm_symbol_size,1,1);
1240

1241
  if (eNB->frame_parms.nb_antennas_rx>1)
1242
    if (eNB->srs_vars[UE_id].srs_ch_estimates) LOG_UM("/tmp/srs_est1.m","srsest1",eNB->srs_vars[UE_id].srs_ch_estimates[1],eNB->frame_parms.ofdm_symbol_size,1,1);
1243
  */
1244 1245
  sprintf(fname,"/tmp/drs_est0_r%d.m",round);
  sprintf(vname,"drsest0_r%d",round);
1246
  LOG_UM(fname,vname,eNB->pusch_vars[UE_id]->drs_ch_estimates[0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1247

1248 1249 1250
  if (eNB->frame_parms.nb_antennas_rx>1) {
    sprintf(fname,"/tmp/drs_est1_r%d.m",round);
    sprintf(vname,"drsest1_r%d",round);
1251
    LOG_UM(fname,vname,eNB->pusch_vars[UE_id]->drs_ch_estimates[1],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1252
  }
1253

1254 1255
  sprintf(fname,"/tmp/ulsch0_rxF_comp0_r%d.m",round);
  sprintf(vname,"ulsch0_rxF_comp0_r%d",round);
1256
  LOG_UM(fname,vname,&eNB->pusch_vars[UE_id]->rxdataF_comp[0][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
bruno mongazon's avatar
bruno mongazon committed
1257
  //  LOG_M("ulsch_rxF_comp1.m","ulsch0_rxF_comp1",&eNB->pusch_vars[UE_id]->rxdataF_comp[0][1][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1258 1259
  sprintf(fname,"/tmp/ulsch_rxF_llr_r%d.m",round);
  sprintf(vname,"ulsch_llr_r%d",round);
1260
  LOG_UM(fname,vname,eNB->pusch_vars[UE_id]->llr,
1261 1262
         eNB->ulsch[UE_id]->harq_processes[harq_pid]->nb_rb*12*eNB->ulsch[UE_id]->harq_processes[harq_pid]->Qm
         *eNB->ulsch[UE_id]->harq_processes[harq_pid]->Nsymb_pusch,1,0);
1263 1264
  sprintf(fname,"/tmp/ulsch_ch_mag_r%d.m",round);
  sprintf(vname,"ulsch_ch_mag_r%d",round);
1265 1266
  LOG_UM(fname,vname,&eNB->pusch_vars[UE_id]->ul_ch_mag[0][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
  //  LOG_UM("ulsch_ch_mag1.m","ulsch_ch_mag1",&eNB->pusch_vars[UE_id]->ul_ch_mag[1][0],eNB->frame_parms.N_RB_UL*12*nsymb,1,1);
1267
  //#endif
1268 1269
}