Commit 2f0646f3 authored by Navid Nikaein's avatar Navid Nikaein

* add a ulsch scheduler and preprocessor

* check for false msg3 detection at the MAC layer, and indicate PHY tp cancel the RA proc 
* fix few compilation errors for Rel10


git-svn-id: http://svn.eurecom.fr/openair4G/trunk@5824 818b1a75-f10b-46b9-bf7c-635c3b92a50f
parent b79bd77b
......@@ -223,7 +223,7 @@ hashtable_rc_t hashtable_remove(hash_table_t *hashtblP, const hash_key_t keyP)
hash=hashtblP->hashfunc(keyP)%hashtblP->size;
node=hashtblP->nodes[hash];
while(node) {
if(node->key != keyP) {
if(node->key == keyP) {
if(prevnode) prevnode->next=node->next;
else hashtblP->nodes[hash]=node->next;
if (node->data) {
......
......@@ -86,6 +86,21 @@ int32_t lte_segmentation(uint8_t *input_buffer,
uint32_t *Kminus,
uint32_t *F);
/** \fn int16_t estimate_ue_tx_power(uint32_t tbs, uint32_t nb_rb, uint8_t control_only, lte_prefix_type_t ncp, uint8_t use_srs)
\brief this functions calculates the delta MCS in dB based on the lte_segmentation function
\param tbs transport block size
\param nb_rb number of required rb
\param control_only a flag for the type of data
\param ncp cyclic prefix
\param use_srs a flag indicating the use of srs in the current SF
\returns ue_tx_power estimated ue tx power = delat_ mcs + bw_factor
*/
int16_t estimate_ue_tx_power(uint32_t tbs,
uint32_t nb_rb,
uint8_t control_only,
lte_prefix_type_t ncp,
uint8_t use_srs);
/** \fn uint32_t sub_block_interleaving_turbo(uint32_t D, uint8_t *d,uint8_t *w)
\brief This is the subblock interleaving algorithm from 36-212 (Release 8, 8.6 2009-03), pages 15-16.
This function takes the d-sequence and generates the w-sequence. The nu-sequence from 36-212 is implicit.
......
......@@ -32,6 +32,7 @@
date: 21.10.2009
*/
#include "PHY/defs.h"
#include "SCHED/extern.h"
//#define DEBUG_SEGMENTATION
......@@ -166,6 +167,72 @@ int lte_segmentation(unsigned char *input_buffer,
return(0);
}
// uint8_t eNB_id,uint8_t harq_pid, uint8_t UE_id,
int16_t estimate_ue_tx_power(uint32_t tbs, uint32_t nb_rb, uint8_t control_only, lte_prefix_type_t ncp, uint8_t use_srs){
/// The payload + CRC size in bits, "B"
uint32_t B;
/// Number of code segments
uint32_t C;
/// Number of "small" code segments
uint32_t Cminus;
/// Number of "large" code segments
uint32_t Cplus;
/// Number of bits in "small" code segments (<6144)
uint32_t Kminus;
/// Number of bits in "large" code segments (<6144)
uint32_t Kplus;
/// Total number of bits across all segments
uint32_t sumKr;
/// Number of "Filler" bits
uint32_t F;
// num resource elements
uint32_t num_re=0.0;
// num symbols
uint32_t num_symb=0.0;
/// effective spectral efficiency of the PUSCH
uint32_t MPR_x100=0;
/// beta_offset
uint16_t beta_offset_pusch_x8=8;
/// delta mcs
float delta_mcs=0.0;
/// bandwidth factor
float bw_factor=0.0;
B= tbs+24;
lte_segmentation(NULL,
NULL,
B,
&C,
&Cplus,
&Cminus,
&Kplus,
&Kminus,
&F);
sumKr = Cminus*Kminus + Cplus*Kplus;
num_symb = 12-(ncp<<1)-(use_srs==0?0:1);
num_re = num_symb * nb_rb * 12;
if (num_re == 0)
return(0);
MPR_x100 = 100*sumKr/num_re;
if (control_only == 1 )
beta_offset_pusch_x8=8; // fixme
//(beta_offset_pusch_x8=phy_vars_ue->ulsch_ue[eNB_id]->harq_processes[harq_pid]->control_only == 1) ? phy_vars_ue->ulsch_ue[eNB_id]->beta_offset_cqi_times8:8;
// if deltamcs_enabledm
delta_mcs = ((hundred_times_delta_TF[MPR_x100/6]+10*dB_fixed_times10((beta_offset_pusch_x8)>>3))/100.0);
bw_factor = (hundred_times_log10_NPRB[nb_rb-1]/100.0);
#ifdef DEBUG_SEGMENTATION
printf("estimated ue tx power %d (num_re %d, sumKr %d, mpr_x100 %d, delta_mcs %f, bw_factor %f)\n",
(int16_t)ceil(delta_mcs + bw_factor), num_re, sumKr, MPR_x100, delta_mcs, bw_factor);
#endif
return (int16_t)ceil(delta_mcs + bw_factor);
}
#ifdef MAIN
main() {
......
......@@ -51,10 +51,10 @@
* Memory Initializaion and Cleanup for LTE MODEM.
* @{
\section _Memory_init_ Memory Initialization for LTE MODEM
Blah Blah
*/
#define DEBUG_PHY
//#define DEBUG_PHY
/*
#ifndef USER_MODE
......
......@@ -77,6 +77,8 @@ extern int synch_wait_cnt;
extern OPENAIR_DAQ_VARS openair_daq_vars;
extern int16_t hundred_times_delta_TF[100];
extern uint16_t hundred_times_log10_NPRB[100];
/*
#ifdef EMOS
extern fifo_dump_emos_UE emos_dump_UE;
......
......@@ -3132,7 +3132,18 @@ void phy_procedures_eNB_RX(unsigned char sched_subframe,PHY_VARS_eNB *phy_vars_e
phy_vars_eNB->ulsch_eNB[i]->rnti,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->b,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->TBS>>3,
harq_pid);
harq_pid,
&phy_vars_eNB->ulsch_eNB[i]->Msg3_flag);
// false msg3 detection by MAC: empty PDU
if (phy_vars_eNB->ulsch_eNB[i]->Msg3_flag == 0 ) {
phy_vars_eNB->eNB_UE_stats[i].mode = PRACH;
mac_xface->cancel_ra_proc(phy_vars_eNB->Mod_id,
phy_vars_eNB->CC_id,
frame,
phy_vars_eNB->eNB_UE_stats[i].crnti);
remove_ue(phy_vars_eNB->eNB_UE_stats[i].crnti,phy_vars_eNB,abstraction_flag);
phy_vars_eNB->ulsch_eNB[(uint32_t)i]->Msg3_active = 0;
}
/*
mac_xface->terminate_ra_proc(phy_vars_eNB->Mod_id,
frame,
......@@ -3193,7 +3204,8 @@ void phy_procedures_eNB_RX(unsigned char sched_subframe,PHY_VARS_eNB *phy_vars_e
phy_vars_eNB->ulsch_eNB[i]->rnti,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->b,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->TBS>>3,
harq_pid);
harq_pid,
NULL);
//}
/*
else {
......@@ -3663,7 +3675,9 @@ void phy_procedures_eNB_RX(unsigned char sched_subframe,PHY_VARS_eNB *phy_vars_e
phy_vars_eNB->ulsch_eNB[i]->rnti,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->b,
phy_vars_eNB->ulsch_eNB[i]->harq_processes[harq_pid]->TBS>>3,
harq_pid);
harq_pid,
NULL);
phy_vars_eNB->cba_last_reception[i%num_active_cba_groups]=1;//(subframe);
} else {
LOG_N(PHY,"[eNB %d] Frame %d subframe %d : CBA collision detected for UE%d for group %d, set the SR for this UE \n ",
......
......@@ -466,6 +466,8 @@ typedef struct{
uint64_t total_pdu_bytes_rx;
// total num pdu
uint32_t total_num_pdus_rx;
// num of error pdus
uint32_t total_num_errors_rx;
}eNB_UE_STATS;
......@@ -496,11 +498,23 @@ typedef struct{
uint8_t DLSCH_DCI[8][(MAX_DCI_SIZE_BITS>>3)+1];
/// Number of Allocated RBs for DL after scheduling (prior to frequency allocation)
uint16_t nb_rb[8];
uint16_t nb_rb[8]; // num_max_harq
/// Number of Allocated RBs for UL after scheduling (prior to frequency allocation)
uint16_t nb_rb_ul[8];
uint16_t nb_rb_ul[8]; // num_max_harq
/// Number of Allocated RBs by the ulsch preprocessor
uint8_t pre_allocated_nb_rb_ul;
/// index of Allocated RBs by the ulsch preprocessor
int8_t pre_allocated_rb_table_index_ul;
/// total allocated RBs
int8_t total_allocated_rbs;
/// assigned MCS by the ulsch preprocessor
uint8_t pre_assigned_mcs_ul;
/// DCI buffer for ULSCH
uint8_t ULSCH_DCI[8][(MAX_DCI_SIZE_BITS>>3)+1];
......@@ -524,6 +538,9 @@ typedef struct{
/// phr information
int8_t phr_info;
/// phr information
int8_t phr_info_configured;
//dl buffer info
uint32_t dl_buffer_info[MAX_NUM_LCID];
......@@ -541,6 +558,15 @@ typedef struct{
uint32_t dl_buffer_head_sdu_remaining_size_to_send[MAX_NUM_LCID];
// uplink info
uint32_t ul_total_buffer;
uint32_t ul_buffer_creation_time[MAX_NUM_LCGID];
uint32_t ul_buffer_creation_time_max;
uint32_t ul_buffer_info[MAX_NUM_LCGID];
} UE_TEMPLATE;
typedef struct{
......@@ -644,7 +670,9 @@ typedef struct{
UE_sched_ctrl UE_sched_ctrl[NUMBER_OF_UE_MAX];
int next[NUMBER_OF_UE_MAX];
int head;
int head;
int next_ul[NUMBER_OF_UE_MAX];
int head_ul;
int avail;
int num_UEs;
boolean_t active[NUMBER_OF_UE_MAX];
......
......@@ -1695,6 +1695,7 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP, sub_frame_t subframeP
// Get candidate harq_pid from PHY
mac_xface->get_ue_active_harq_pid(module_idP,CC_id,RA_template->rnti,frameP,subframeP,&harq_pid,&round,0);
if (round>0) {
//RA_template->wait_ack_Msg4++;
// we have to schedule a retransmission
if (PHY_vars_eNB_g[module_idP][CC_id]->lte_frame_parms.frame_type == TDD)
((DCI1A_5MHz_TDD_1_6_t*)&RA_template->RA_alloc_pdu2[0])->ndi=1;
......@@ -1733,6 +1734,11 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP, sub_frame_t subframeP
module_idP,frameP,subframeP,RA_template->rnti);
}
else {
/* msg4 not received
if ((round == 0) && (RA_template->wait_ack_Msg4>1){
remove UE instance across all the layers: mac_xface->cancel_RA();
}
*/
LOG_I(MAC,"[eNB %d][RAPROC] Frame %d, subframeP %d : Msg4 acknowledged\n",module_idP,frameP,subframeP);
RA_template->wait_ack_Msg4=0;
RA_template->RA_active=FALSE;
......
......@@ -195,11 +195,16 @@ uint8_t find_num_active_UEs_in_cbagroup(module_id_t module_idP, int CC_id,unsign
}
#endif
void dump_ue_list(UE_list_t *listP) {
void dump_ue_list(UE_list_t *listP, int ul_flag) {
int j;
for (j=listP->head;j>=0;j=listP->next[j]) {
LOG_T(MAC,"node %d => %d\n",j,listP->next[j]);
if ( ul_flag == 0 ){
for (j=listP->head;j>=0;j=listP->next[j]) {
LOG_T(MAC,"node %d => %d\n",j,listP->next[j]);
}
} else {
for (j=listP->head_ul;j>=0;j=listP->next_ul[j]) {
LOG_T(MAC,"node %d => %d\n",j,listP->next_ul[j]);
}
}
}
......@@ -210,14 +215,15 @@ int add_new_ue(module_id_t mod_idP, int cc_idP, rnti_t rntiP,int harq_pidP) {
UE_list_t *UE_list = &eNB_mac_inst[mod_idP].UE_list;
LOG_D(MAC,"[eNB %d, CC_id %d] Adding UE with rnti %x (next avail %d, num_UEs %d)\n",mod_idP,cc_idP,rntiP,UE_list->avail,UE_list->num_UEs);
dump_ue_list(UE_list);
dump_ue_list(UE_list,0);
if (UE_list->avail>=0) {
UE_id = UE_list->avail;
UE_list->avail = UE_list->next[UE_list->avail];
UE_list->next[UE_id] = UE_list->head;
UE_list->next_ul[UE_id] = UE_list->head;
UE_list->head = UE_id;
UE_list->head_ul = UE_id;
UE_list->UE_template[cc_idP][UE_id].rnti = rntiP;
UE_list->UE_template[cc_idP][UE_id].configured = FALSE;
UE_list->numactiveCCs[UE_id] = 1;
......@@ -235,24 +241,24 @@ int add_new_ue(module_id_t mod_idP, int cc_idP, rnti_t rntiP,int harq_pidP) {
eNB_ulsch_info[mod_idP][UE_id].status = S_UL_WAITING;
eNB_dlsch_info[mod_idP][UE_id].status = S_UL_WAITING;
LOG_D(MAC,"[eNB %d] Add UE_id %d on Primary CC_id %d: rnti %x\n",mod_idP,UE_id,cc_idP,rntiP);
dump_ue_list(UE_list);
dump_ue_list(UE_list,0);
return(UE_id);
}
LOG_E(MAC,"error in add_new_ue(), could not find space in UE_list, Dumping UE list\n");
dump_ue_list(UE_list);
dump_ue_list(UE_list,0);
return(-1);
}
int mac_remove_ue(module_id_t mod_idP, int ue_idP) {
int prev,i;
int prev,i, ret=-1;
UE_list_t *UE_list = &eNB_mac_inst[mod_idP].UE_list;
int pCC_id = UE_PCCID(mod_idP,ue_idP);
LOG_I(MAC,"Removing UE %d from Primary CC_id %d (rnti %x)\n",ue_idP,pCC_id, UE_list->UE_template[pCC_id][ue_idP].rnti);
dump_ue_list(UE_list);
dump_ue_list(UE_list,0);
// clear all remaining pending transmissions
UE_list->UE_template[pCC_id][ue_idP].bsr_info[LCGID0] = 0;
......@@ -283,15 +289,32 @@ int mac_remove_ue(module_id_t mod_idP, int ue_idP) {
UE_list->avail = i;
UE_list->active[i] = FALSE;
UE_list->num_UEs--;
return(0);
ret=0;
break;
}
prev=i;
}
// do the same for UL
prev = UE_list->head_ul;
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
if (i == ue_idP) {
// link prev to next in Active list
if (prev==UE_list->head_ul)
UE_list->head_ul = UE_list->next_ul[i];
else
UE_list->next_ul[prev] = UE_list->next_ul[i];
// add UE id (i)to available
UE_list->next_ul[i] = UE_list->avail;
ret = 0;
break;
}
prev=i;
}
if (ret == 0)
return (0);
LOG_E(MAC,"error in mac_remove_ue(), could not find previous to %d in UE_list, should never happen, Dumping UE list\n",ue_idP);
dump_ue_list(UE_list);
dump_ue_list(UE_list,0);
mac_xface->macphy_exit("");
return(-1);
......@@ -299,84 +322,146 @@ int mac_remove_ue(module_id_t mod_idP, int ue_idP) {
int prev(UE_list_t *listP, int nodeP) {
int prev(UE_list_t *listP, int nodeP, int ul_flag) {
int j;
if (nodeP==listP->head)
return(nodeP);
for (j=listP->head;j>=0;j=listP->next[j]) {
if (listP->next[j]==nodeP)
return(j);
if (ul_flag == 0 ) {
if (nodeP==listP->head)
return(nodeP);
for (j=listP->head;j>=0;j=listP->next[j]) {
if (listP->next[j]==nodeP)
return(j);
}
} else {
if (nodeP==listP->head_ul)
return(nodeP);
for (j=listP->head_ul;j>=0;j=listP->next_ul[j]) {
if (listP->next_ul[j]==nodeP)
return(j);
}
}
LOG_E(MAC,"error in prev(), could not find previous to %d in UE_list, should never happen, Dumping UE list\n",nodeP);
dump_ue_list(listP);
LOG_E(MAC,"error in prev(), could not find previous to %d in UE_list %s, should never happen, Dumping UE list\n",
nodeP, (ul_flag == 0)? "DL" : "UL");
dump_ue_list(listP, ul_flag);
return(-1);
}
void swap_UEs(UE_list_t *listP,int nodeiP, int nodejP) {
void swap_UEs(UE_list_t *listP,int nodeiP, int nodejP, int ul_flag) {
int prev_i,prev_j,next_i,next_j;
LOG_D(MAC,"Swapping UE %d,%d\n",nodeiP,nodejP);
dump_ue_list(listP);
LOG_T(MAC,"Swapping UE %d,%d\n",nodeiP,nodejP);
dump_ue_list(listP,ul_flag);
prev_i = prev(listP,nodeiP);
prev_j = prev(listP,nodejP);
prev_i = prev(listP,nodeiP,ul_flag);
prev_j = prev(listP,nodejP,ul_flag);
if ((prev_i<0) || (prev_j<0))
mac_xface->macphy_exit("");
next_i = listP->next[nodeiP];
next_j = listP->next[nodejP];
LOG_D(MAC,"next_i %d, next_i, next_j %d, head %d\n",next_i,next_j,listP->head);
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_D(MAC,"Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next[nodeiP] = next_j;
listP->next[nodejP] = nodeiP;
if (nodeiP==listP->head) // case i j n(j)
listP->head = nodejP;
else
listP->next[prev_i] = nodejP;
}
else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_D(MAC,"Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next[nodejP] = next_i;
listP->next[nodeiP] = nodejP;
if (nodejP==listP->head) // case j i n(i)
listP->head = nodeiP;
else
listP->next[prev_j] = nodeiP;
if (ul_flag == 0){
next_i = listP->next[nodeiP];
next_j = listP->next[nodejP];
} else {
next_i = listP->next_ul[nodeiP];
next_j = listP->next_ul[nodejP];
}
else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next[nodejP] = next_i;
listP->next[nodeiP] = next_j;
LOG_T(MAC,"[%s] next_i %d, next_i, next_j %d, head %d \n",
(ul_flag == 0)? "DL" : "UL",
next_i,next_j,listP->head);
if (nodeiP==listP->head) {
LOG_D(MAC,"changing head to %d\n",nodejP);
listP->head=nodejP;
listP->next[prev_j] = nodeiP;
if (ul_flag == 0 ) {
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC,"Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next[nodeiP] = next_j;
listP->next[nodejP] = nodeiP;
if (nodeiP==listP->head) // case i j n(j)
listP->head = nodejP;
else
listP->next[prev_i] = nodejP;
}
else if (nodejP==listP->head){
LOG_D(MAC,"changing head to %d\n",nodeiP);
listP->head=nodeiP;
listP->next[prev_i] = nodejP;
else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC,"Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next[nodejP] = next_i;
listP->next[nodeiP] = nodejP;
if (nodejP==listP->head) // case j i n(i)
listP->head = nodeiP;
else
listP->next[prev_j] = nodeiP;
}
else {
listP->next[prev_i] = nodejP;
listP->next[prev_j] = nodeiP;
else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next[nodejP] = next_i;
listP->next[nodeiP] = next_j;
if (nodeiP==listP->head) {
LOG_T(MAC,"changing head to %d\n",nodejP);
listP->head=nodejP;
listP->next[prev_j] = nodeiP;
}
else if (nodejP==listP->head){
LOG_D(MAC,"changing head to %d\n",nodeiP);
listP->head=nodeiP;
listP->next[prev_i] = nodejP;
}
else {
listP->next[prev_i] = nodejP;
listP->next[prev_j] = nodeiP;
}
}
} else { // ul_flag
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC,"[UL] Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next_ul[nodeiP] = next_j;
listP->next_ul[nodejP] = nodeiP;
if (nodeiP==listP->head_ul) // case i j n(j)
listP->head_ul = nodejP;
else
listP->next_ul[prev_i] = nodejP;
}
else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC,"[UL]Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = nodejP;
if (nodejP==listP->head_ul) // case j i n(i)
listP->head_ul = nodeiP;
else
listP->next_ul[prev_j] = nodeiP;
}
else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = next_j;
if (nodeiP==listP->head_ul) {
LOG_T(MAC,"[UL]changing head to %d\n",nodejP);
listP->head_ul=nodejP;
listP->next_ul[prev_j] = nodeiP;
}
else if (nodejP==listP->head_ul){
LOG_T(MAC,"[UL]changing head to %d\n",nodeiP);
listP->head_ul=nodeiP;
listP->next_ul[prev_i] = nodejP;
}
else {
listP->next_ul[prev_i] = nodejP;
listP->next_ul[prev_j] = nodeiP;
}
}
}
LOG_D(MAC,"After swap\n");
dump_ue_list(listP);
LOG_T(MAC,"After swap\n");
dump_ue_list(listP,ul_flag);
}
void SR_indication(module_id_t mod_idP, int cc_idP, frame_t frameP, rnti_t rntiP, sub_frame_t subframeP) {
......
......@@ -73,7 +73,7 @@
// This table holds the allowable PRB sizes for ULSCH transmissions
uint8_t rb_table[33] = {1,2,3,4,5,6,8,9,10,12,15,16,18,20,24,25,27,30,32,36,40,45,48,50,54,60,72,75,80,81,90,96,100};
void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8_t *sduP, uint16_t sdu_lenP,int harq_pidP) {
void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8_t *sduP, uint16_t sdu_lenP,int harq_pidP,uint8_t *msg3_flagP) {
unsigned char rx_ces[MAX_NUM_CE],num_ce,num_sdu,i,*payload_ptr;
unsigned char rx_lcids[NB_RB_MAX];
......@@ -101,6 +101,7 @@ void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8
if (UE_id != UE_INDEX_INVALID ){
UE_list->UE_template[CC_idP][UE_id].phr_info = (payload_ptr[0] & 0x3f) - PHR_MAPPING_OFFSET;
LOG_D(MAC, "[eNB] MAC CE_LCID %d : Received PHR PH = %d (db)\n", rx_ces[i], UE_list->UE_template[CC_idP][UE_id].phr_info);
UE_list->UE_template[CC_idP][UE_id].phr_info_configured=1;
}
payload_ptr+=sizeof(POWER_HEADROOM_CMD);
break;
......@@ -116,6 +117,8 @@ void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8
LOG_D(MAC, "[eNB] MAC CE_LCID %d : Received short BSR LCGID = %u bsr = %d\n",
rx_ces[i], lcgid, payload_ptr[0] & 0x3f);
UE_list->UE_template[CC_idP][UE_id].bsr_info[lcgid] = (payload_ptr[0] & 0x3f);
if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[lcgid] == 0 )
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[lcgid]=frameP;
}
payload_ptr += 1;//sizeof(SHORT_BSR); // fixme
} break;
......@@ -134,6 +137,27 @@ void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8
UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID1],
UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID2],
UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID3]);
if (UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID0] == 0 )
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID0]=0;
else if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID0] == 0)
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID0]=frameP;
if (UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID1] == 0 )
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID1]=0;
else if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID1] == 0)
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID1]=frameP;
if (UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID2] == 0 )
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID2]=0;
else if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID2] == 0)
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID2]=frameP;
if (UE_list->UE_template[CC_idP][UE_id].bsr_info[LCGID3] == 0 )
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID3]= 0;
else if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID3] == 0)
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID3]=frameP;
}
payload_ptr += 3;////sizeof(LONG_BSR);
break;
......@@ -249,16 +273,25 @@ void rx_sdu(module_id_t enb_mod_idP,int CC_idP,frame_t frameP,rnti_t rntiP,uint8
break;
default : //if (rx_lcids[i] >= DTCH) {
UE_list->eNB_UE_stats[CC_idP][UE_id].num_errors_rx+=1;
LOG_E(MAC,"[eNB %d] received unsupported or unknown LCID %d from UE %d ", rx_lcids[i], UE_id);
LOG_E(MAC,"[eNB %d] Frame %d : received unsupported or unknown LCID %d from UE %d ", enb_mod_idP, frameP, rx_lcids[i], UE_id);
break;
}
payload_ptr+=rx_lengths[i];
}
/* NN--> FK: we could either check the payload, or use a phy helper to detect a false msg3 */
if ((num_sdu == 0) && (num_ce==0)){
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_errors_rx+=1;
if (msg3_flagP != NULL){
if( *msg3_flagP == 1 ) {
LOG_N(MAC,"[eNB %d] frame %d : false msg3 detection: signal phy to canceling RA and remove the UE\n", enb_mod_idP, frameP);
*msg3_flagP=0;
}
}
}else {
UE_list->eNB_UE_stats[CC_idP][UE_id].total_pdu_bytes_rx+=sdu_lenP;
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_pdus_rx+=1;
}
UE_list->eNB_UE_stats[CC_idP][UE_id].total_pdu_bytes_rx+=sdu_lenP;
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_pdus_rx+=1;
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_RX_SDU,0);
stop_meas(&eNB->rx_ulsch_sdu);
}
......@@ -278,6 +311,51 @@ uint32_t bytes_to_bsr_index(int32_t nbytes) {
return(i-1);
}
void adjust_bsr_info(int buffer_occupancy,
uint16_t TBS,
UE_TEMPLATE *UE_template){
uint32_t tmp_bsr;
// could not serve all the uplink traffic
if (buffer_occupancy > 0 ) {
if (BSR_TABLE[UE_template->bsr_info[LCGID0]] <= TBS ) {
tmp_bsr = BSR_TABLE[UE_template->bsr_info[LCGID0]]; // serving this amout of bytes
UE_template->bsr_info[LCGID0] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID1]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID1]];
UE_template->bsr_info[LCGID1] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID2]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID2]];
UE_template->bsr_info[LCGID2] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID3]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID3]];
UE_template->bsr_info[LCGID3] = 0;
} else {
UE_template->bsr_info[LCGID3] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID3]] - ((int32_t) TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID2] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID2]] - ((int32_t)TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID1] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID1]] - ((int32_t)TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID0] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID0]] - (int32_t)TBS);
}
}
else { // we have flushed all buffers so clear bsr
UE_template->bsr_info[LCGID0] = 0;
UE_template->bsr_info[LCGID1] = 0;
UE_template->bsr_info[LCGID2] = 0;
UE_template->bsr_info[LCGID3] = 0;
}
}
void add_ue_ulsch_info(module_id_t module_idP, int CC_id, int UE_id, sub_frame_t subframeP, UE_ULSCH_STATUS status){
......@@ -418,7 +496,8 @@ unsigned char *parse_ulsch_header(unsigned char *mac_header,
}
void schedule_ulsch(module_id_t module_idP, frame_t frameP,unsigned char cooperation_flag,sub_frame_t subframeP, unsigned char sched_subframe,unsigned int *nCCE) {//,int calibration_flag) {
void schedule_ulsch(module_id_t module_idP, frame_t frameP,unsigned char cooperation_flag,sub_frame_t subframeP, unsigned char sched_subframe,
unsigned int *nCCE) {//,int calibration_flag) {
unsigned int nCCE_available[MAX_NUM_CCs];
......@@ -460,7 +539,7 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP,unsigned char coopera
schedule_ulsch_rnti(module_idP, cooperation_flag, frameP, subframeP, sched_subframe, nCCE, nCCE_available, first_rb);
#ifdef CBA
schedule_ulsch_cba_rnti(module_idP, cooperation_flag, frameP, subframeP, sched_subframe, granted_UEs, nCCE, &nCCE_available, &first_rb);
schedule_ulsch_cba_rnti(module_idP, cooperation_flag, frameP, subframeP, sched_subframe, granted_UEs, nCCE, nCCE_available, first_rb);
#endif
......@@ -477,224 +556,148 @@ void schedule_ulsch_rnti(module_id_t module_idP,
unsigned char sched_subframe,
unsigned int *nCCE,
unsigned int *nCCE_available,
uint16_t *first_rb){
uint16_t *first_rb){
int UE_id;
unsigned char aggregation = 2;
int UE_id;
uint8_t aggregation = 2;
rnti_t rnti = -1;
uint8_t round = 0;
uint8_t harq_pid = 0;
uint8_t round = 0;
uint8_t harq_pid = 0;
void *ULSCH_dci = NULL;
LTE_eNB_UE_stats *eNB_UE_stats = NULL;
DCI_PDU *DCI_pdu;
uint8_t status = 0;
uint8_t rb_table_index = -1;
uint16_t TBS,i;
int32_t buffer_occupancy;
uint32_t tmp_bsr;
uint32_t cqi_req,cshift,ndi,mcs,rballoc;
uint8_t status = 0;
uint8_t rb_table_index = -1;
uint16_t TBS,i;
int32_t buffer_occupancy=0;
uint32_t cqi_req,cshift,ndi,mcs,rballoc;
int n,CC_id;
eNB_MAC_INST *eNB=&eNB_mac_inst[module_idP];
UE_list_t *UE_list=&eNB->UE_list;
UE_TEMPLATE *UE_template;
int rvidx_tab[4] = {0,3,1,2};
LTE_DL_FRAME_PARMS *frame_parms;
int rvidx_tab[4] = {0,2,3,1};
LOG_I(MAC,"entering ulsch preprocesor\n");
ulsch_scheduler_pre_processor(module_idP,
frameP,
subframeP,
first_rb,
aggregation,
nCCE);
LOG_I(MAC,"exiting ulsch preprocesor\n");
// loop over all active UEs
for (UE_id=UE_list->head;(UE_id>=0) && (*nCCE_available > (1<<aggregation));UE_id=UE_list->next[UE_id]) {
rnti = UE_RNTI(module_idP,UE_id); // radio network temp id is obtained
if (rnti==0) // if so, go to next UE
for (UE_id=UE_list->head_ul;UE_id>=0;UE_id=UE_list->next_ul[UE_id]) {
rnti = UE_RNTI(module_idP,UE_id);
if (rnti==0) {
LOG_W(MAC,"[eNB %d] frame %d subfarme %d, UE %d CC %d: no RNTI \n", module_idP,frameP,subframeP,UE_id,CC_id);
continue;
}
// loop over all active UL CC_ids for this UE
for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
DCI_pdu = &eNB->common_channels[CC_id].DCI_pdu;
UE_template = &UE_list->UE_template[CC_id][UE_id];
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
if (eNB_UE_stats==NULL)
mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
LOG_D(MAC,"[eNB %d] Scheduler Frame %d, subframeP %d, nCCE %d: Checking ULSCH next UE_id %d mode id %d (rnti %x,mode %s), format 0\n",
module_idP,frameP,subframeP,*nCCE,UE_id,module_idP, rnti,mode_string[eNB_UE_stats->mode]);
if (eNB_UE_stats==NULL){
LOG_W(MAC,"[eNB %d] frame %d subfarme %d, UE %d CC %d: no PHY context\n", module_idP,frameP,subframeP,UE_id,CC_id);
continue; // mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
}
if (nCCE_available[CC_id] < (1<<aggregation)){
LOG_W(MAC,"[eNB %d] frame %d subfarme %d, UE %d CC %d: not enough nCCE\n", module_idP,frameP,subframeP,UE_id,CC_id);
continue; // break;
}
if (eNB_UE_stats->mode == PUSCH) { // ue has a ulsch channel
int8_t ret;
// Get candidate harq_pid from PHY
ret = mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,1);
LOG_D(MAC,"Got harq_pid %d, round %d, UE_id %d (UE_to_be_scheduled %d)\n",harq_pid,round,UE_id,
UE_is_to_be_scheduled(module_idP,CC_id,UE_id));
DCI_pdu = &eNB->common_channels[CC_id].DCI_pdu;
UE_template = &UE_list->UE_template[CC_id][UE_id];
if (mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,1) == -1 ){
LOG_W(MAC,"[eNB %d] Scheduler Frame %d, subframeP: candidate harq_pid from PHY for UE %d CC %d RNTI %x\n",
module_idP,frameP,subframeP, UE_id, CC_id, rnti);
// NN --> RK: Don't schedule UE if we cannot get harq pid
//should we continue or set harq_pid to 0?
continue;
}else
LOG_D(MAC,"[eNB %d] Frame %d, subframeP %d, UE %d CC %d : got harq pid %d round %d (nCCE %d, rnti %x,mode %s)\n",
module_idP,frameP,subframeP,UE_id,CC_id, harq_pid, round,nCCE[CC_id],rnti,mode_string[eNB_UE_stats->mode]);
/* [SR] 01/07/13: Don't schedule UE if we cannot get harq pid */
#ifndef EXMIMO_IOT
if ((((UE_is_to_be_scheduled(module_idP,CC_id,UE_id)>0)) || (round>0) || ((frameP%10)==0)) && (ret == 0))
if (((UE_is_to_be_scheduled(module_idP,CC_id,UE_id)>0)) || (round>0) || ((frameP%10)==0))
// if there is information on bsr of DCCH, DTCH or if there is UL_SR, or if there is a packet to retransmit, or we want to schedule a periodic feedback every 10 frames
#else
if (round==0)
#endif
{
LOG_I(MAC,"[eNB %d][PUSCH %d] Frame %d subframeP %d Scheduling UE %d round %d, PHR %d (SR %d)\n",
module_idP,harq_pid,frameP,subframeP,UE_id,round,
UE_template->phr_info,
UE_template->ul_SR);
// reset the scheduling request
UE_template->ul_SR = 0;
aggregation = process_ue_cqi(module_idP,UE_id); // =2 by default!!
// msg("[MAC][eNB] subframeP %d: aggregation %d\n",subframeP,aggregation);
status = mac_get_rrc_status(module_idP,1,UE_id);
if (status < RRC_CONNECTED)
cqi_req = 0;
else
cqi_req = 1;
if (round > 0) {
ndi = UE_template->oldNDI_UL[harq_pid];
if ((round&3)==0)
mcs = openair_daq_vars.target_ue_ul_mcs;
else
mcs = rvidx_tab[round&3] + 28; //not correct for round==4!
}
else {
cqi_req = (status < RRC_CONNECTED)? 0:1;
// new transmission
if (round==0) {
ndi = 1-UE_template->oldNDI_UL[harq_pid];
UE_template->oldNDI_UL[harq_pid]=ndi;
mcs = openair_daq_vars.target_ue_ul_mcs;
}
LOG_D(MAC,"[eNB %d] ULSCH scheduler: harq_pid %d, Ndi %d, mcs %d\n",module_idP,harq_pid,ndi,mcs);
/* if((cooperation_flag > 0) && (UE_id == 1)) { // Allocation on same set of RBs
// RIV:resource indication value // function in openair1/PHY/LTE_TRANSPORT/dci_tools.c
rballoc = mac_xface->computeRIV(mac_xface->lte_frame_parms->N_RB_UL,
((UE_id-1)*4),//openair_daq_vars.ue_ul_nb_rb),
4);//openair_daq_vars.ue_ul_nb_rb);
}*/
if (round==0) {
rb_table_index = 1;
TBS = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
buffer_occupancy = ((UE_template->bsr_info[LCGID0] == 0) &&
(UE_template->bsr_info[LCGID1] == 0) &&
(UE_template->bsr_info[LCGID2] == 0) &&
(UE_template->bsr_info[LCGID3] == 0))?
BSR_TABLE[11] : // This is when we've received SR and buffers are fully served
BSR_TABLE[UE_template->bsr_info[LCGID0]]+
BSR_TABLE[UE_template->bsr_info[LCGID1]]+
BSR_TABLE[UE_template->bsr_info[LCGID2]]+
BSR_TABLE[UE_template->bsr_info[LCGID3]]; // This is when remaining data in UE buffers (even if SR is triggered)
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] Frame %d subframeP %d Scheduled UE, BSR for LCGID0 %d, LCGID1 %d, LCGID2 %d LCGID3 %d, BO %d\n",
module_idP,
UE_id,
rnti,
frameP,
subframeP,
UE_template->bsr_info[LCGID0],
UE_template->bsr_info[LCGID1],
UE_template->bsr_info[LCGID2],
UE_template->bsr_info[LCGID3],
buffer_occupancy);
while ((TBS < buffer_occupancy) &&
rb_table[rb_table_index]<(mac_xface->lte_frame_parms->N_RB_UL-1-*first_rb)){
// continue until we've exhauster the UEs request or the total number of available PRBs
/* LOG_I(MAC,"[eNB %d][PUSCH %x] Frame %d subframeP %d Scheduled UE (rb_table_index %d => TBS %d)\n",
module_idP,rnti,frameP,subframeP,
rb_table_index,TBS);
*/
rb_table_index++;
TBS = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
}
if (rb_table[rb_table_index]>(mac_xface->lte_frame_parms->N_RB_UL-1-*first_rb)) {
mcs = cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
if (UE_template->pre_allocated_rb_table_index_ul >=0)
rb_table_index=UE_template->pre_allocated_rb_table_index_ul;
else // NN-->RK: check this condition
rb_table_index=1; // for PHR
buffer_occupancy = UE_template->ul_total_buffer;
while ((rb_table[rb_table_index]>(frame_parms->N_RB_UL-1-first_rb[CC_id])) &&
(rb_table_index>0)) {
rb_table_index--;
TBS = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
}
//rb_table_index = 8;
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] Frame %d subframeP %d Scheduled UE (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d,DAI %d)\n",
module_idP,harq_pid,rnti,frameP,subframeP,mcs,
*first_rb,rb_table[rb_table_index],
rb_table_index,mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]),
harq_pid,
UE_template->DAI_ul[sched_subframe]);
rballoc = mac_xface->computeRIV(mac_xface->lte_frame_parms->N_RB_UL,
*first_rb,
rb_table[rb_table_index]);//openair_daq_vars.ue_ul_nb_rb);
*first_rb+=rb_table[rb_table_index]; // increment for next UE allocation
UE_template->nb_rb_ul[harq_pid] = rb_table[rb_table_index]; //store for possible retransmission
// the max TBS that could be served is : mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]
TBS = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
buffer_occupancy -= TBS;
rballoc = mac_xface->computeRIV(frame_parms->N_RB_UL,
first_rb[CC_id],
rb_table[rb_table_index]);
// increment for next UE allocation
first_rb[CC_id]+=rb_table[rb_table_index];
//store for possible retransmission
UE_template->nb_rb_ul[harq_pid] = rb_table[rb_table_index];
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] Frame %d subframeP %d Scheduled UE (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d)\n",
module_idP,harq_pid,rnti,frameP,subframeP,mcs,
first_rb[CC_id],rb_table[rb_table_index],
rb_table_index,TBS,harq_pid);
// Adjust BSR entries for LCGIDs
if (buffer_occupancy > 0 ) { // could not serve all the uplink traffic
if (BSR_TABLE[UE_template->bsr_info[LCGID0]] <= TBS ) {
tmp_bsr = BSR_TABLE[UE_template->bsr_info[LCGID0]]; // serving this amout of bytes
UE_template->bsr_info[LCGID0] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID1]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID1]];
UE_template->bsr_info[LCGID1] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID2]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID2]];
UE_template->bsr_info[LCGID2] = 0;
if (BSR_TABLE[UE_template->bsr_info[LCGID3]] <= (TBS-tmp_bsr)) {
tmp_bsr += BSR_TABLE[UE_template->bsr_info[LCGID3]];
UE_template->bsr_info[LCGID3] = 0;
} else {
UE_template->bsr_info[LCGID3] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID3]] - ((int32_t) TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID2] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID2]] - ((int32_t)TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID1] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID1]] - ((int32_t)TBS - (int32_t)tmp_bsr));
}
}
else {
UE_template->bsr_info[LCGID0] = bytes_to_bsr_index((int32_t)BSR_TABLE[UE_template->bsr_info[LCGID0]] - (int32_t)TBS);
}
}
else { // we have flushed all buffers so clear bsr
UE_template->bsr_info[LCGID0] = 0;
UE_template->bsr_info[LCGID1] = 0;
UE_template->bsr_info[LCGID2] = 0;
UE_template->bsr_info[LCGID3] = 0;
}
} // ndi==1
else { //we schedule a retransmission
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] Frame %d subframeP %d Scheduled UE retransmission (mcs %d, first rb %d, nb_rb %d, TBS %d, harq_pid %d)\n",
adjust_bsr_info(buffer_occupancy,
TBS,
UE_template);
}
else if (round > 0) { //we schedule a retransmission
ndi = UE_template->oldNDI_UL[harq_pid];
if ((round&3)==0)
mcs = openair_daq_vars.target_ue_ul_mcs;
else
mcs = rvidx_tab[round&3] + 28; //not correct for round==4!
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] Frame %d subframeP %d Scheduled UE retransmission (mcs %d, first rb %d, nb_rb %d, TBS %d, harq_pid %d, round %d)\n",
module_idP,UE_id,rnti,frameP,subframeP,mcs,
*first_rb,UE_template->nb_rb_ul[harq_pid],
first_rb[CC_id],UE_template->nb_rb_ul[harq_pid],
mac_xface->get_TBS_UL(mcs,UE_template->nb_rb_ul[harq_pid]),
harq_pid);
harq_pid, round);
rballoc = mac_xface->computeRIV(mac_xface->lte_frame_parms->N_RB_UL,
*first_rb,
rballoc = mac_xface->computeRIV(frame_parms->N_RB_UL,
first_rb[CC_id],
UE_template->nb_rb_ul[harq_pid]);
*first_rb+=UE_template->nb_rb_ul[harq_pid]; // increment for next UE allocation
first_rb[CC_id]+=UE_template->nb_rb_ul[harq_pid]; // increment for next UE allocation
}
// Cyclic shift for DM RS
if(cooperation_flag == 2) {
if(UE_id == 1)// For Distriibuted Alamouti, cyclic shift applied to 2nd UE
......@@ -705,8 +708,8 @@ void schedule_ulsch_rnti(module_id_t module_idP,
else
cshift = 0;// values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1)
if (mac_xface->lte_frame_parms->frame_type == TDD) {
switch (mac_xface->lte_frame_parms->N_RB_UL) {
if (frame_parms->frame_type == TDD) {
switch (frame_parms->N_RB_UL) {
case 6:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
......@@ -801,9 +804,9 @@ void schedule_ulsch_rnti(module_id_t module_idP,
0);
break;
}
}
} // TDD
else { //FDD
switch (mac_xface->lte_frame_parms->N_RB_UL) {
switch (frame_parms->N_RB_UL) {
case 25:
default:
......@@ -897,10 +900,7 @@ void schedule_ulsch_rnti(module_id_t module_idP,
}
}
//#ifdef DEBUG_eNB_SCHEDULER
// dump_dci(mac_xface->lte_frame_parms,
// &DCI_pdu->dci_alloc[DCI_pdu->Num_common_dci+DCI_pdu->Num_ue_spec_dci-1]);
//#endif
add_ue_ulsch_info(module_idP,
CC_id,
UE_id,
......@@ -909,13 +909,16 @@ void schedule_ulsch_rnti(module_id_t module_idP,
nCCE[CC_id] = nCCE[CC_id] + (1<<aggregation);
nCCE_available[CC_id] = mac_xface->get_nCCE_max(module_idP,CC_id) - nCCE[CC_id];
//msg("[MAC][eNB %d][ULSCH Scheduler] Frame %d, subframeP %d: Generated ULSCH DCI for next UE_id %d, format 0\n", module_idP,frameP,subframeP,UE_id);
//break; // leave loop after first UE is schedule (avoids m
LOG_D(MAC,"[eNB %d] Frame %d, subframeP %d: Generated ULSCH DCI for next UE_id %d, format 0\n", module_idP,frameP,subframeP,UE_id);
#ifdef DEBUG
dump_dci(frame_parms, &DCI_pdu->dci_alloc[DCI_pdu->Num_common_dci+DCI_pdu->Num_ue_spec_dci-1]);
#endif
} // UE_is_to_be_scheduled
} // UE is in PUSCH
} // loop of CC_id
} // loop over UE_id
} // loop over UE_id
} // loop of CC_id
}
#ifdef CBA
......@@ -924,12 +927,12 @@ void schedule_ulsch_cba_rnti(module_id_t module_idP, unsigned char cooperation_f
DCI0_5MHz_TDD_1_6_t *ULSCH_dci_tdd16;
DCI0_5MHz_FDD_t *ULSCH_dci_fdd;
DCI_PDU *DCI_pdu;
uint8_t CC_id=0;
uint8_t rb_table_index=0, aggregation=2;
uint32_t rballoc;
uint8_t cba_group, cba_resources;
uint8_t required_rbs[NUM_MAX_CBA_GROUP], weight[NUM_MAX_CBA_GROUP], num_cba_resources[NUM_MAX_CBA_GROUP];
uint8_t available_rbs= ceil(mac_xface->lte_frame_parms->N_RB_UL-1-*first_rb);
uint8_t available_rbs= ceil(mac_xface->lte_frame_parms->N_RB_UL-1-first_rb[CC_id]);
uint8_t remaining_rbs= available_rbs;
uint8_t allocated_rbs;
// We compute the weight of each group and initialize some variables
......@@ -1006,10 +1009,10 @@ void schedule_ulsch_cba_rnti(module_id_t module_idP, unsigned char cooperation_f
allocated_rbs=rb_table[rb_table_index];
rballoc = mac_xface->computeRIV(mac_xface->lte_frame_parms->N_RB_UL,
*first_rb,
first_rb[CC_id],
rb_table[rb_table_index]);
*first_rb+=rb_table[rb_table_index];
first_rb[CC_id]+=rb_table[rb_table_index];
LOG_D(MAC,"[eNB %d] Frame %d, subframeP %d: CBA %d rnti %x, total/required/allocated/remaining rbs (%d/%d/%d/%d), rballoc %d, nCCE (%d/%d)\n",
module_idP, frameP, subframeP, cba_group,eNB_mac_inst[module_idP][CC_id].cba_rnti[cba_group],
available_rbs, required_rbs[cba_group], allocated_rbs, remaining_rbs,rballoc,
......
......@@ -90,6 +90,7 @@ extern int cqi_to_mcs[16];
extern uint32_t RRC_CONNECTION_FLAG;
extern uint8_t rb_table[33];
extern DCI0_5MHz_TDD_1_6_t UL_alloc_pdu;
......
......@@ -157,11 +157,14 @@ int mac_top_init(int eMBMS_active, uint8_t cba_group_active, uint8_t HO_active){
UE_list->num_UEs=0;
UE_list->head=-1;
UE_list->head_ul=-1;
UE_list->avail=0;
for (list_el=0;list_el<NUMBER_OF_UE_MAX-1;list_el++) {
UE_list->next[list_el]=list_el+1;
UE_list->next_ul[list_el]=list_el+1;
}
UE_list->next[list_el]=-1;
UE_list->next_ul[list_el]=-1;
#ifdef PHY_EMUL
Mac_rlc_xface->Is_cluster_head[Mod_id]=2;//0: MR, 1: CH, 2: not CH neither MR
......@@ -481,6 +484,7 @@ int l2_init(LTE_DL_FRAME_PARMS *frame_parms,int eMBMS_active, uint8_t cba_group_
#ifdef CBA
mac_xface->phy_config_cba_rnti = phy_config_cba_rnti ;
#endif
mac_xface->estimate_ue_tx_power = estimate_ue_tx_power;
mac_xface->phy_config_meas_ue = phy_config_meas_ue;
mac_xface->phy_reset_ue = phy_reset_ue;
......
......@@ -224,13 +224,13 @@ void assign_rbs_required (module_id_t Mod_id,
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe) {
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag ) {
uint8_t round,round_max=0,harq_pid;
int CC_id;
for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++){
mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frame,subframe,&harq_pid,&round,0);
mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frame,subframe,&harq_pid,&round,ul_flag);
if (round > round_max)
round_max = round;
}
......@@ -279,7 +279,7 @@ void sort_UEs (module_id_t Mod_idP,
UE_id1 = i;
pCC_id1 = UE_PCCID(Mod_idP,UE_id1);
cqi1 = maxcqi(Mod_idP,rnti1); //
round1 = maxround(Mod_idP,rnti1,frameP,subframeP);
round1 = maxround(Mod_idP,rnti1,frameP,subframeP,0);
for(ii=UE_list->next[i];ii>=0;ii=UE_list->next[ii]){
......@@ -290,11 +290,11 @@ void sort_UEs (module_id_t Mod_idP,
continue;
cqi2 = maxcqi(Mod_idP,rnti2);
round2 = maxround(Mod_idP,rnti2,frameP,subframeP); //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
round2 = maxround(Mod_idP,rnti2,frameP,subframeP,0); //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
pCC_id2 = UE_PCCID(Mod_idP,UE_id2);
if(round2 > round1){ // Check first if one of the UEs has an active HARQ process which needs service and swap order
swap_UEs(UE_list,UE_id1,UE_id2);
swap_UEs(UE_list,UE_id1,UE_id2,0);
}
else if (round2 == round1){
// RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels. This should be done on the sum of all information that has to be sent. And still it wouldn't ensure fairness. It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
......@@ -306,18 +306,18 @@ void sort_UEs (module_id_t Mod_idP,
if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
(UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2]) ){
swap_UEs(UE_list,UE_id1,UE_id2);
swap_UEs(UE_list,UE_id1,UE_id2,0);
}
else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max ){
swap_UEs(UE_list,UE_id1,UE_id2);
swap_UEs(UE_list,UE_id1,UE_id2,0);
}
else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total ){
swap_UEs(UE_list,UE_id1,UE_id2);
swap_UEs(UE_list,UE_id1,UE_id2,0);
}
else if (cqi1 < cqi2){
swap_UEs(UE_list,UE_id1,UE_id2);
swap_UEs(UE_list,UE_id1,UE_id2,0);
}
}
}
......@@ -405,14 +405,11 @@ void dlsch_scheduler_pre_processor (module_id_t Mod_id,
total_ue_count =0;
// loop over all active UEs
for (i=UE_list->head;i>=0;i=UE_list->next[i]) {
UE_id = i;
rnti = UE_RNTI(Mod_id,UE_id);
rnti = UE_RNTI(Mod_id,i);
if(rnti == 0)
continue;
UE_id = i;
for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) {
CC_id = UE_list->ordered_CCids[ii][UE_id];
......@@ -426,7 +423,7 @@ void dlsch_scheduler_pre_processor (module_id_t Mod_id,
if (nb_rbs_required[CC_id][UE_id] > 0) {
total_ue_count = total_ue_count + 1;
}
// hypotetical assignement
/*
......@@ -441,17 +438,16 @@ void dlsch_scheduler_pre_processor (module_id_t Mod_id,
if (total_ue_count == 0)
average_rbs_per_user[CC_id] = 0;
else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) )
average_rbs_per_user[CC_id] = (uint16_t) ceil(frame_parms[CC_id]->N_RB_DL/total_ue_count);
average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count);
else
average_rbs_per_user[CC_id] = min_rb_unit[CC_id];
}
}
// note: nb_rbs_required is assigned according to total_buffer_dl
// extend nb_rbs_required to capture per LCID RB required
for(i=UE_list->head;i>=0;i=UE_list->next[i]){
for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) {
CC_id = UE_list->ordered_CCids[ii][UE_id];
for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) {
CC_id = UE_list->ordered_CCids[ii][i];
// control channel
if (mac_get_rrc_status(Mod_id,1,i) < RRC_RECONFIGURED)
nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i];
......@@ -466,8 +462,8 @@ void dlsch_scheduler_pre_processor (module_id_t Mod_id,
for(r1=0;r1<2;r1++){
for(i=UE_list->head; i>=0;i=UE_list->next[i]) {
for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) {
CC_id = UE_list->ordered_CCids[ii][UE_id];
for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) {
CC_id = UE_list->ordered_CCids[ii][i];
if(r1 == 0)
nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i];
......@@ -630,7 +626,7 @@ void dlsch_scheduler_pre_processor (module_id_t Mod_id,
CC_id = UE_list->ordered_CCids[ii][UE_id];
UE_id = i;
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
LOG_D(MAC,"******************Scheduling Information for UE%d ************************\n",UE_id);
LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id);
LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,dl_pow_off[CC_id][UE_id]);
LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id);
for(j=0;j<N_RBG[CC_id];j++){
......@@ -716,7 +712,318 @@ void dlsch_scheduler_pre_processor_allocate (module_id_t Mod_id,
}
/// ULSCH PRE_PROCESSOR
void ulsch_scheduler_pre_processor(module_id_t module_idP,
int frameP,
sub_frame_t subframeP,
uint16_t *first_rb,
uint8_t aggregation,
uint32_t *nCCE){
int16_t i;
uint16_t UE_id,n,r;
uint8_t CC_id, round, harq_pid;
uint16_t nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs];
int16_t total_remaining_rbs[MAX_NUM_CCs];
uint16_t max_num_ue_to_be_scheduled=0,total_ue_count=0;
rnti_t rnti= -1;
uint32_t nCCE_to_be_used[CC_id];
UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list;
UE_TEMPLATE *UE_template;
LTE_DL_FRAME_PARMS *frame_parms;
LOG_I(MAC,"store ulsch buffers\n");
// convert BSR to bytes for comparison with tbs
store_ulsch_buffer(module_idP,frameP, subframeP);
LOG_I(MAC,"assign max mcs min rb\n");
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
assign_max_mcs_min_rb(module_idP,frameP, subframeP, first_rb);
LOG_I(MAC,"sort ue \n");
// sort ues
sort_ue_ul (module_idP,frameP, subframeP);
// we need to distribute RBs among UEs
// step1: reset the vars
for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) {
nCCE_to_be_used[CC_id]= nCCE[CC_id];
total_allocated_rbs[CC_id]=0;
total_remaining_rbs[CC_id]=0;
average_rbs_per_user[CC_id]=0;
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
nb_allocated_rbs[CC_id][i]=0;
}
}
LOG_I(MAC,"step2 \n");
// step 2: calculate the average rb per UE
total_ue_count =0;
max_num_ue_to_be_scheduled=0;
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==0)
continue;
UE_id = i;
for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
average_rbs_per_user[CC_id]=0;
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
if (UE_template->pre_allocated_nb_rb_ul > 0) {
total_ue_count+=1;
}
if((mac_xface->get_nCCE_max(module_idP,CC_id) - nCCE_to_be_used[CC_id]) > (1<<aggregation)){
nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
max_num_ue_to_be_scheduled+=1;
}
if (total_ue_count == 0)
average_rbs_per_user[CC_id] = 0;
else if (total_ue_count == 1 ) // increase the available RBs, special case,
average_rbs_per_user[CC_id] = frame_parms->N_RB_UL-first_rb[CC_id]+1;
else if( (total_ue_count <= (frame_parms->N_RB_DL-first_rb[CC_id])) &&
(total_ue_count <= max_num_ue_to_be_scheduled))
average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/total_ue_count);
else if (max_num_ue_to_be_scheduled > 0 )
average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/max_num_ue_to_be_scheduled);
else {
average_rbs_per_user[CC_id]=1;
LOG_W(MAC,"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n",
module_idP,frameP,subframeP,UE_id,CC_id);
}
}
}
LOG_D(MAC,"[eNB %d] Frame %d subframe %d: total ue %d, max num ue to be scheduled %d\n",
module_idP, frameP, subframeP,total_ue_count, max_num_ue_to_be_scheduled);
LOG_I(MAC,"step3\n");
// step 3: assigne RBS
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==0)
continue;
UE_id = i;
for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,1);
if(round>0)
nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
else
nb_allocated_rbs[CC_id][UE_id] = cmin(UE_template->pre_allocated_nb_rb_ul, average_rbs_per_user[CC_id]);
total_allocated_rbs[CC_id]+= nb_allocated_rbs[CC_id][UE_id];
}
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for(r=0;r<2;r++){
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==0)
continue;
UE_id = i;
for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
total_remaining_rbs[CC_id]=frame_parms->N_RB_UL - first_rb[CC_id] - total_allocated_rbs[CC_id];
if (total_ue_count == 1 )
total_remaining_rbs[CC_id]+=1;
if ( r == 0 ) {
while ( (UE_template->pre_allocated_nb_rb_ul > 0 ) &&
(nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul) &&
(total_remaining_rbs[CC_id] > 0)){
nb_allocated_rbs[CC_id][UE_id] = cmin(nb_allocated_rbs[CC_id][UE_id]+1,UE_template->pre_allocated_nb_rb_ul);
total_remaining_rbs[CC_id]--;
total_allocated_rbs[CC_id]++;
}
}
else {
UE_template->pre_allocated_nb_rb_ul= nb_allocated_rbs[CC_id][UE_id];
LOG_D(MAC,"******************UL Scheduling Information for UE%d CC_id %d ************************\n",UE_id, CC_id);
LOG_D(MAC,"[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", module_idP, UE_id, CC_id, UE_template->pre_allocated_nb_rb_ul);
}
}
}
}
for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) {
frame_parms= mac_xface->get_lte_frame_parms(module_idP,CC_id);
if (total_allocated_rbs[CC_id]>0)
LOG_D(MAC,"[eNB %d] total RB allocated for all UEs = %d/%d\n", module_idP, total_allocated_rbs[CC_id], frame_parms->N_RB_UL - first_rb[CC_id]);
}
}
void store_ulsch_buffer(module_id_t module_idP, int frameP, sub_frame_t subframeP){
int UE_id,pCC_id,lcgid;
UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list;
UE_TEMPLATE *UE_template;
for (UE_id=UE_list->head_ul;UE_id>=0;UE_id=UE_list->next_ul[UE_id]) {
UE_template = &UE_list->UE_template[UE_PCCID(module_idP,UE_id)][UE_id];
UE_template->ul_total_buffer=0;
for (lcgid=0; lcgid<MAX_NUM_LCGID; lcgid++){
UE_template->ul_buffer_info[lcgid]=BSR_TABLE[UE_template->bsr_info[lcgid]];
UE_template->ul_total_buffer+= UE_template->ul_buffer_info[lcgid];
// UE_template->ul_buffer_creation_time_max=cmax(UE_template->ul_buffer_creation_time_max, frame_cycle*1024 + frameP-UE_template->ul_buffer_creation_time[lcgid]));
}
if ( UE_template->ul_total_buffer >0)
LOG_D(MAC,"[eNB %d] Frame %d subframe %d UE %d CC id %d: LCGID0 %d, LCGID1 %d, LCGID2 %d LCGID3 %d, BO %d\n",
module_idP, frameP,subframeP, UE_id, UE_PCCID(module_idP,UE_id),
UE_template->ul_buffer_info[LCGID0],
UE_template->ul_buffer_info[LCGID1],
UE_template->ul_buffer_info[LCGID2],
UE_template->ul_buffer_info[LCGID3],
UE_template->ul_total_buffer);
else if (UE_is_to_be_scheduled(module_idP,UE_PCCID(module_idP,UE_id),UE_id) > 0 ){
if (UE_template->ul_total_buffer == 0 )
UE_template->ul_total_buffer = BSR_TABLE[11];
LOG_D(MAC,"[eNB %d] Frame %d subframe %d UE %d CC id %d: SR active, set BO to %d \n",
module_idP, frameP,subframeP, UE_id, UE_PCCID(module_idP,UE_id),
UE_template->ul_total_buffer);
}
}
}
void assign_max_mcs_min_rb(module_id_t module_idP,int frameP, sub_frame_t subframeP, uint16_t *first_rb){
int i;
uint16_t n,UE_id;
uint8_t CC_id;
rnti_t rnti = -1;
int mcs=cmin(16,openair_daq_vars.target_ue_ul_mcs);
int rb_table_index=1,tbs,tx_power;
UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list;
UE_TEMPLATE *UE_template;
LTE_DL_FRAME_PARMS *frame_parms;
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==0)
continue;
UE_id = i;
for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
frame_parms=mac_xface->get_lte_frame_parms(module_idP,CC_id);
UE_template = &UE_list->UE_template[CC_id][UE_id];
// if this UE has UL traffic
if (UE_template->ul_total_buffer > 0 ) {
tbs = mac_xface->get_TBS_UL(mcs,1);
// fixme: set use_srs flag
tx_power= mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0);
while (((UE_template->phr_info - tx_power) < 0 ) &&
(mcs >=0)){
// LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs);
mcs--;
tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); // fixme: set use_srs
}
while ((tbs < UE_template->ul_total_buffer) &&
(rb_table[rb_table_index]<(frame_parms->N_RB_UL-first_rb[CC_id])) &&
((UE_template->phr_info - tx_power) > 0) &&
(rb_table_index < 33 )){
// LOG_I(MAC,"tbs %d ul buffer %d rb table %d max ul rb %d\n", tbs, UE_template->ul_total_buffer, rb_table[rb_table_index], frame_parms->N_RB_UL-first_rb[CC_id]);
rb_table_index++;
tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0);
}
if (rb_table[rb_table_index]>(frame_parms->N_RB_UL-first_rb[CC_id])) {
rb_table_index--;
}
UE_template->pre_assigned_mcs_ul=mcs;
UE_template->pre_allocated_rb_table_index_ul=rb_table_index;
UE_template->pre_allocated_nb_rb_ul= rb_table[rb_table_index];
LOG_D(MAC,"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n",
module_idP, frameP, subframeP, UE_id, CC_id,
UE_template->pre_assigned_mcs_ul,
UE_template->pre_allocated_rb_table_index_ul,
UE_template->pre_allocated_nb_rb_ul,
UE_template->phr_info,tx_power);
} else {
UE_template->pre_allocated_rb_table_index_ul=-1;
UE_template->pre_allocated_nb_rb_ul=0;
}
}
}
}
void sort_ue_ul (module_id_t module_idP,int frameP, sub_frame_t subframeP){
int UE_id1,UE_id2;
int pCCid1,pCCid2;
int round1,round2;
int i=0,ii=0,j=0;
rnti_t rnti1,rnti2;
UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list;
for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) {
LOG_I(MAC,"sort ue ul i %d\n",i);
rnti1 = UE_RNTI(module_idP,i);
if(rnti1 == 0)
continue;
UE_id1 = i;
pCCid1 = UE_PCCID(module_idP,UE_id1);
round1 = maxround(module_idP,rnti1,frameP,subframeP,1);
for (ii=UE_list->next_ul[i];ii>=0;ii=UE_list->next_ul[ii]) {
LOG_I(MAC,"sort ul ue 2 ii %d\n",ii);
rnti2 = UE_RNTI(module_idP,ii);
if(rnti2 == 0)
continue;
UE_id2 = ii;
pCCid2 = UE_PCCID(module_idP,UE_id2);
round2 = maxround(module_idP,rnti2,frameP,subframeP,1);
if(round2 > round1){
swap_UEs(UE_list,UE_id1,UE_id2,1);
}
else if (round2 == round1){
if (UE_list->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] < UE_list->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0]){
swap_UEs(UE_list,UE_id1,UE_id2,1);
}
else if (UE_list->UE_template[pCCid1][UE_id1].ul_total_buffer < UE_list->UE_template[pCCid2][UE_id2].ul_total_buffer){
swap_UEs(UE_list,UE_id1,UE_id2,1);
}
else if (UE_list->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul < UE_list->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul){
if (UE_list->UE_template[pCCid2][UE_id2].ul_total_buffer > 0 )
swap_UEs(UE_list,UE_id1,UE_id2,1);
}
}
}
}
}
......@@ -269,8 +269,9 @@ void cancel_ra_proc(module_id_t module_idP,int CC_id,frame_t frameP, uint16_t pr
@param rnti RNTI of UE transmitting the SR
@param sdu Pointer to received SDU
@param harq_pid Index of harq process corresponding to this sdu
@param msg3_flag flag indicating that this sdu is msg3
*/
void rx_sdu(module_id_t module_idP, int CC_id,frame_t frameP, rnti_t rnti, uint8_t *sdu, uint16_t sdu_len, int harq_pid);
void rx_sdu(module_id_t module_idP, int CC_id,frame_t frameP, rnti_t rnti, uint8_t *sdu, uint16_t sdu_len, int harq_pid,uint8_t *msg3_flag);
/* \brief Function to indicate a scheduled schduling request (SR) was received by eNB.
@param Mod_id Instance ID of eNB
......@@ -473,12 +474,22 @@ int mac_init(void);
int add_new_ue(module_id_t Mod_id, int CC_id, rnti_t rnti,int harq_pid);
int mac_remove_ue(module_id_t Mod_id, int UE_id);
void swap_UEs(UE_list_t *listP,int nodeiP, int nodejP);
int prev(UE_list_t *listP, int nodeP);
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag);
void swap_UEs(UE_list_t *listP,int nodeiP, int nodejP, int ul_flag);
int prev(UE_list_t *listP, int nodeP, int ul_flag);
void dump_ue_list(UE_list_t *listP, int ul_flag);
int UE_num_active_CC(UE_list_t *listP,int ue_idP);
int UE_PCCID(module_id_t mod_idP,int ue_idP);
rnti_t UE_RNTI(module_id_t mod_idP, int ue_idP);
void ulsch_scheduler_pre_processor(module_id_t module_idP, int frameP, sub_frame_t subframeP, uint16_t *first_rb, uint8_t aggregattion, uint32_t *nCCE);
void store_ulsch_buffer(module_id_t module_idP, int frameP, sub_frame_t subframeP);
void sort_ue_ul (module_id_t module_idP,int frameP, sub_frame_t subframeP);
void assign_max_mcs_min_rb(module_id_t module_idP,int frameP, sub_frame_t subframeP,uint16_t *first_rb);
void adjust_bsr_info(int buffer_occupancy, uint16_t TBS, UE_TEMPLATE *UE_template);
/*! \fn UE_L2_state_t ue_scheduler(module_id_t module_idP,frame_t frameP, sub_frame_t subframe, lte_subframe_t direction,uint8_t eNB_index)
\brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures.
\param[in] module_idP instance of the UE
......
......@@ -210,7 +210,7 @@ uint32_t ue_get_SR(module_id_t module_idP,int CC_id,frame_t frameP,uint8_t eNB_i
int gapOffset = -1;
int T = 0;
DevCheck(module_idP < NB_UE_INST, module_idP, NB_UE_INST, 0);
DevCheck(module_idP < (int)NB_UE_INST, module_idP, NB_UE_INST, 0);
if (CC_id>0) {
LOG_E(MAC,"Transmission on secondary CCs is not supported yet\n");
......@@ -219,12 +219,6 @@ uint32_t ue_get_SR(module_id_t module_idP,int CC_id,frame_t frameP,uint8_t eNB_i
}
// determin the measurement gap
LOG_D(MAC,"[UE %d][SR %x] Frame %d subframe %d PHY asks for SR (SR_COUNTER/dsr_TransMax %d/%d), SR_pending %d\n",
module_idP,rnti,frameP,subframe,
UE_mac_inst[module_idP].scheduling_info.SR_COUNTER,
(1<<(2+UE_mac_inst[module_idP].physicalConfigDedicated->schedulingRequestConfig->choice.setup.dsr_TransMax)),
UE_mac_inst[module_idP].scheduling_info.SR_pending);
if (UE_mac_inst[module_idP].measGapConfig !=NULL){
if (UE_mac_inst[module_idP].measGapConfig->choice.setup.gapOffset.present == MeasGapConfig__setup__gapOffset_PR_gp0){
MGRP= 40;
......@@ -243,10 +237,18 @@ uint32_t ue_get_SR(module_id_t module_idP,int CC_id,frame_t frameP,uint8_t eNB_i
return(0);
}
}
if ((UE_mac_inst[module_idP].scheduling_info.SR_pending==1) &&
if ((UE_mac_inst[module_idP].physicalConfigDedicated != NULL) &&
(UE_mac_inst[module_idP].scheduling_info.SR_pending==1) &&
(UE_mac_inst[module_idP].scheduling_info.SR_COUNTER <
(1<<(2+UE_mac_inst[module_idP].physicalConfigDedicated->schedulingRequestConfig->choice.setup.dsr_TransMax)))
){
LOG_D(MAC,"[UE %d][SR %x] Frame %d subframe %d PHY asks for SR (SR_COUNTER/dsr_TransMax %d/%d), SR_pending %d\n",
module_idP,rnti,frameP,subframe,
UE_mac_inst[module_idP].scheduling_info.SR_COUNTER,
(1<<(2+UE_mac_inst[module_idP].physicalConfigDedicated->schedulingRequestConfig->choice.setup.dsr_TransMax)),
UE_mac_inst[module_idP].scheduling_info.SR_pending);
UE_mac_inst[module_idP].scheduling_info.SR_COUNTER++;
// start the sr-prohibittimer : rel 9 and above
if (UE_mac_inst[module_idP].scheduling_info.sr_ProhibitTimer > 0) { // timer configured
......@@ -1425,6 +1427,7 @@ UE_L2_STATE_t ue_scheduler(module_id_t module_idP,frame_t frameP, sub_frame_t su
if (UE_mac_inst[module_idP].RA_contention_resolution_cnt ==
((1+rach_ConfigCommon->ra_SupervisionInfo.mac_ContentionResolutionTimer)<<3)) {
UE_mac_inst[module_idP].RA_active = 0;
UE_mac_inst[module_idP].RA_contention_resolution_timer_active = 0;
// Signal PHY to quit RA procedure
LOG_E(MAC,"Module id %u Contention resolution timer expired, RA failed\n", module_idP);
mac_xface->ra_failed(module_idP,0,eNB_indexP);
......@@ -1488,26 +1491,27 @@ UE_L2_STATE_t ue_scheduler(module_id_t module_idP,frame_t frameP, sub_frame_t su
// Put this in a function
// Call PHR procedure as described in Section 5.4.6 in 36.321
if (UE_mac_inst[module_idP].PHR_state == MAC_MainConfig__phr_Config_PR_setup){ // normal operation
if (UE_mac_inst[module_idP].PHR_reconfigured == 1) { // upon (re)configuration of the power headroom reporting functionality by upper layers
UE_mac_inst[module_idP].PHR_reporting_active = 1;
UE_mac_inst[module_idP].PHR_reconfigured = 0;
} else {
//LOG_D(MAC,"PHR normal operation %d active %d \n", UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF, UE_mac_inst[module_idP].PHR_reporting_active);
if ((UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF <= 0) &&
((mac_xface->get_PL(module_idP,0,eNB_indexP) < UE_mac_inst[module_idP].scheduling_info.PathlossChange_db) ||
(UE_mac_inst[module_idP].power_backoff_db[eNB_indexP] > UE_mac_inst[module_idP].scheduling_info.PathlossChange_db)))
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active ==0 )
UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF--;
if (UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF <= 0 )
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active == 0 )
UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF--;
}
if (UE_mac_inst[module_idP].PHR_reconfigured == 1) { // upon (re)configuration of the power headroom reporting functionality by upper layers
UE_mac_inst[module_idP].PHR_reporting_active = 1;
UE_mac_inst[module_idP].PHR_reconfigured = 0;
} else {
//LOG_D(MAC,"PHR normal operation %d active %d \n", UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF, UE_mac_inst[module_idP].PHR_reporting_active);
if ((UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF <= 0) &&
((mac_xface->get_PL(module_idP,0,eNB_indexP) < UE_mac_inst[module_idP].scheduling_info.PathlossChange_db) ||
(UE_mac_inst[module_idP].power_backoff_db[eNB_indexP] > UE_mac_inst[module_idP].scheduling_info.PathlossChange_db)))
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active ==0 )
UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF--;
if (UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF <= 0 )
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active == 0 )
UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF--;
}
} else { // release / nothing
UE_mac_inst[module_idP].PHR_reporting_active = 0; // release PHR
UE_mac_inst[module_idP].PHR_reporting_active = 0; // release PHR
}
//If the UE has UL resources allocated for new transmission for this TTI here:
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_SCHEDULER, VCD_FUNCTION_OUT);
......
......@@ -52,7 +52,9 @@ void rlc_am_init(rlc_am_entity_t *rlc_pP, frame_t frameP)
//LOG_D(RLC,"RLC_AM_SDU_CONTROL_BUFFER_SIZE %d sizeof(rlc_am_tx_sdu_management_t) %d \n", RLC_AM_SDU_CONTROL_BUFFER_SIZE, sizeof(rlc_am_tx_sdu_management_t));
rlc_pP->input_sdus = calloc(1, RLC_AM_SDU_CONTROL_BUFFER_SIZE*sizeof(rlc_am_tx_sdu_management_t));
rlc_pP->pdu_retrans_buffer = calloc(1, (uint16_t)((unsigned int)RLC_AM_PDU_RETRANSMISSION_BUFFER_SIZE*(unsigned int)sizeof(rlc_am_tx_data_pdu_management_t)));
#warning "cast the rlc retrans buffer to uint32"
// rlc_pP->pdu_retrans_buffer = calloc(1, (uint16_t)((unsigned int)RLC_AM_PDU_RETRANSMISSION_BUFFER_SIZE*(unsigned int)sizeof(rlc_am_tx_data_pdu_management_t)));
rlc_pP->pdu_retrans_buffer = calloc(1, (uint32_t)((unsigned int)RLC_AM_PDU_RETRANSMISSION_BUFFER_SIZE*(unsigned int)sizeof(rlc_am_tx_data_pdu_management_t)));
LOG_D(RLC, "[FRAME %5u][RLC_AM][MOD XX][RB XX][INIT] input_sdus[] = %p element size=%d\n", frameP, rlc_pP->input_sdus,sizeof(rlc_am_tx_sdu_management_t));
LOG_D(RLC, "[FRAME %5u][RLC_AM][MOD XX][RB XX][INIT] pdu_retrans_buffer[] = %p element size=%d\n", frameP, rlc_pP->pdu_retrans_buffer,sizeof(rlc_am_tx_data_pdu_management_t));
......
......@@ -88,7 +88,7 @@ typedef struct
uint8_t* (*get_dlsch_sdu)(module_id_t Mod_id,int CC_id,frame_t frameP,rnti_t rnti,uint8_t TB_index);
/// Send ULSCH sdu to MAC for given rnti
void (*rx_sdu)(module_id_t Mod_id,int CC_id,frame_t frameP,rnti_t rnti, uint8_t *sdu,uint16_t sdu_len, int harq_pid);
void (*rx_sdu)(module_id_t Mod_id,int CC_id,frame_t frameP,rnti_t rnti, uint8_t *sdu,uint16_t sdu_len, int harq_pid,uint8_t *msg3_flag);
/// Indicate failure to synch to external source
void (*mrbch_phy_sync_failure) (module_id_t Mod_id,frame_t frameP, uint8_t free_eNB_index);
......@@ -131,6 +131,10 @@ typedef struct
#endif
// configure the cba rnti at the physical layer
void (*phy_config_cba_rnti)(module_id_t Mod_id,eNB_flag_t eNB_flag, uint8_t index, uint16_t cba_rnti, uint8_t cba_group_id, uint8_t num_active_cba_groups);
/// get delta mcs for fast UL AMC
// uint8_t eNB_id,uint8_t harq_pid, uint8_t UE_id,
int16_t (*estimate_ue_tx_power)(uint32_t tbs, uint32_t nb_rb, uint8_t control_only, lte_prefix_type_t ncp, uint8_t use_srs);
/// UE functions
......
......@@ -1662,6 +1662,10 @@ uint16_t do_RRCConnectionReconfiguration(uint8_t Mod
C_RNTI_t *cba_rnti,
struct RRCConnectionReconfiguration_r8_IEs__dedicatedInfoNASList
*dedicatedInfoNASList
#ifdef Rel10
, SCellToAddMod_r10_t *SCell_config
#endif
) {
asn_enc_rval_t enc_rval;
......@@ -2114,7 +2118,7 @@ uint8_t do_ULInformationTransfer(uint8_t **buffer, uint32_t pdu_length, uint8_t
return encoded;
}
OAI_UECapability_t *fill_ue_capability() {
OAI_UECapability_t *fill_ue_capability(void) {
static OAI_UECapability_t UECapability; /* TODO declared static to allow returning this has an address should be allocated in a cleaner way. */
SupportedBandEUTRA_t Bandlist[4];
// BandInfoEUTRA_t BandInfo_meas[4];
......
......@@ -190,7 +190,11 @@ uint16_t do_RRCConnectionReconfiguration(uint8_t Mod
RSRP_Range_t *rsrp,
C_RNTI_t *cba_rnti,
struct RRCConnectionReconfiguration_r8_IEs__dedicatedInfoNASList
*dedicatedInfoNASList);
*dedicatedInfoNASList
#ifdef Rel10
, SCellToAddMod_r10_t *SCell_config
#endif
);
/***
* \brief Generate an MCCH-Message (eNB). This routine configures MBSFNAreaConfiguration (PMCH-InfoList and Subframe Allocation for MBMS data)
......@@ -218,7 +222,7 @@ uint8_t do_DLInformationTransfer(uint8_t Mod_id, uint8_t **buffer, uint8_t trans
uint8_t do_ULInformationTransfer(uint8_t **buffer, uint32_t pdu_length, uint8_t *pdu_buffer);
OAI_UECapability_t *fill_ue_capability();
OAI_UECapability_t *fill_ue_capability(void);
uint8_t do_UECapabilityEnquiry(uint8_t Mod_id,
uint8_t *buffer,
......
......@@ -71,6 +71,7 @@
#ifdef Rel10
#include "MCCH-Message.h"
#include "MBSFNAreaConfiguration-r9.h"
#include "SCellToAddMod-r10.h"
#endif
#include "AS-Config.h"
#include "AS-Context.h"
......@@ -312,6 +313,9 @@ typedef struct eNB_RRC_INST_s {
uint8_t *SIB23;
uint8_t sizeof_SIB23;
uint16_t physCellId;
#ifdef Rel10
SCellToAddMod_r10_t sCell_config[NUMBER_OF_UE_MAX][2];
#endif
BCCH_BCH_Message_t mib;
BCCH_DL_SCH_Message_t siblock1;
BCCH_DL_SCH_Message_t systemInformation;
......
......@@ -537,6 +537,7 @@ void rrc_eNB_free_UE_index(
eNB_rrc_inst[enb_mod_idP].Info.UE_list[ue_mod_idP]);
eNB_rrc_inst[enb_mod_idP].Info.UE[ue_mod_idP].Status = RRC_IDLE;
eNB_rrc_inst[enb_mod_idP].Info.UE_list[ue_mod_idP] = 0;
free(eNB_rrc_inst[enb_mod_idP].SRB_configList[ue_mod_idP]);
}
/*------------------------------------------------------------------------------*/
......@@ -1156,7 +1157,11 @@ static void rrc_eNB_generate_defaultRRCConnectionReconfiguration(
#else
physicalConfigDedicated[ue_mod_idP], MeasObj_list, ReportConfig_list, quantityConfig, MeasId_list,
#endif
mac_MainConfig, NULL, NULL, Sparams, rsrp, cba_RNTI, dedicatedInfoNASList);
mac_MainConfig, NULL, NULL, Sparams, rsrp, cba_RNTI, dedicatedInfoNASList
#ifdef Rel10
, NULL //SCellToAddMod_r10_t
#endif
);
#ifdef RRC_MSG_PRINT
LOG_F(RRC,"[MSG] RRC Connection Reconfiguration\n");
......@@ -1199,8 +1204,9 @@ int rrc_eNB_generate_RRCConnectionReconfiguration_SCell(module_id_t enb_mod_idP,
uint8_t sCellIndexToAdd = 0; //one SCell so far
// uint8_t sCellIndexToAdd;
// sCellIndexToAdd = rrc_find_free_SCell_index(enb_mod_idP, ue_mod_idP, 1);
if (eNB_rrc_inst[enb_mod_idP].sCell_config[ue_mod_idP][sCellIndexToAdd]) {
eNB_rrc_inst[enb_mod_idP].sCell_config[ue_mod_idP][sCellIndexToAdd]->cellIdentification_r10->dl_CarrierFreq_r10 = dl_CarrierFreq_r10;
// if (eNB_rrc_inst[enb_mod_idP].sCell_config[ue_mod_idP][sCellIndexToAdd] ) {
if (eNB_rrc_inst[enb_mod_idP].sCell_config != NULL) {
eNB_rrc_inst[enb_mod_idP].sCell_config[ue_mod_idP][sCellIndexToAdd].cellIdentification_r10->dl_CarrierFreq_r10 = dl_CarrierFreq_r10;
}
else {
LOG_E(RRC,"Scell not configured!\n");
......@@ -1210,15 +1216,12 @@ int rrc_eNB_generate_RRCConnectionReconfiguration_SCell(module_id_t enb_mod_idP,
size = do_RRCConnectionReconfiguration(enb_mod_idP,
buffer,
ue_mod_idP,
/*0*/rrc_eNB_get_next_transaction_identifier(enb_mod_idP),//Transaction_id,
rrc_eNB_get_next_transaction_identifier(enb_mod_idP),//Transaction_id,
(SRB_ToAddModList_t*)NULL,
(DRB_ToAddModList_t*)NULL,
(DRB_ToReleaseList_t*)NULL,
(struct SPS_Config*)NULL,
(struct PhysicalConfigDedicated*)NULL,
#ifdef Rel10
eNB_rrc_inst[enb_mod_idP].sCell_config[ue_mod_idP][sCellIndexToAdd],
#endif
(MeasObjectToAddModList_t*)NULL,
(ReportConfigToAddModList_t*)NULL,
(QuantityConfig_t*)NULL,
......@@ -1229,7 +1232,12 @@ int rrc_eNB_generate_RRCConnectionReconfiguration_SCell(module_id_t enb_mod_idP,
(struct MeasConfig__speedStatePars*)NULL,
(RSRP_Range_t*)NULL,
(C_RNTI_t*)NULL,
(struct RRCConnectionReconfiguration_r8_IEs__dedicatedInfoNASList*)NULL);
(struct RRCConnectionReconfiguration_r8_IEs__dedicatedInfoNASList*)NULL
#ifdef Rel10
, eNB_rrc_inst[enb_mod_idP].sCell_config
#endif
);
LOG_I(RRC,"[eNB %d] Frame %d, Logical Channel DL-DCCH, Generate RRCConnectionReconfiguration (bytes %d, UE id %d)\n",
enb_mod_idP,frame, size, ue_mod_idP);
......@@ -2206,7 +2214,11 @@ void rrc_eNB_generate_RRCConnectionReconfiguration_handover(
NULL, //*sps_Config,
physicalConfigDedicated[ue_mod_idP], MeasObj_list, ReportConfig_list, NULL, //quantityConfig,
MeasId_list, mac_MainConfig, NULL, mobilityInfo, Sparams,
NULL, NULL, dedicatedInfoNASList);
NULL, NULL, dedicatedInfoNASList
#ifdef Rel10
, NULL // SCellToAddMod_r10_t
#endif
);
LOG_I(RRC,
"[eNB %d] Frame %d, Logical Channel DL-DCCH, Generate RRCConnectionReconfiguration for handover (bytes %d, UE id %d)\n",
......
......@@ -146,7 +146,7 @@ void average_pkt_jitter(int src, int dst, int application){
otg_info->rx_jitter_avg[src][dst][application]/= otg_info->rx_jitter_sample[src][dst][application];
otg_info->rx_jitter_avg_e2e[src][dst][application]/= otg_info->rx_jitter_sample[src][dst][application];
} else {
LOG_W(OTG,"[src %d][dst %d][app %d]number of samples for jitter calculation is %d\n",src, dst, application, otg_info->rx_jitter_sample[src][dst][application]);
LOG_T(OTG,"[src %d][dst %d][app %d]number of samples for jitter calculation is %d\n",src, dst, application, otg_info->rx_jitter_sample[src][dst][application]);
}
if (otg_info->rx_jitter_avg[src][dst][application] > 0) {
......@@ -239,7 +239,7 @@ void kpi_gen() {
int num_active_source=0;
int dl_ok=0,ul_ok;
int dl_ok=0,ul_ok=0;
char traffic_type[12];
char traffic[30];
......@@ -849,9 +849,9 @@ if ((g_otg->background_stats==1)&&(otg_info->tx_num_bytes_background[i][j]>0)){
}
if ((dl_ok == 1 ) && (ul_ok ==1))
LOG_I(OTG,"************ DL and UL loss rate below 10% *************\n");
LOG_I(OTG,"************ DL and UL loss rate below 10 *************\n");
else
LOG_I(OTG,"************ DL and UL loss rate above 10% *************\n");
LOG_I(OTG,"************ DL and UL loss rate above 10 *************\n");
#endif
}
......
......@@ -407,15 +407,15 @@ int check_data_transmit(int src,int dst, int app, int ctime){
// do not generate packet for this pair of src, dst : no app type and/or no idt are defined
if (g_otg->flow_start[src][dst][app] > ctime ){
//g_ otg->flow_start_flag[src][dst][app]=1;
LOG_D(OTG,"Flow start time not reached : do not generate packet for this pair of src=%d, dst =%d, start %d < ctime %d \n",
LOG_T(OTG,"Flow start time not reached : do not generate packet for this pair of src=%d, dst =%d, start %d < ctime %d \n",
src, dst,g_otg->flow_start[src][dst][app], ctime);
size+=0;
}else if (g_otg->flow_duration[src][dst][app] + g_otg->flow_start[src][dst][app] < ctime ){
LOG_D(OTG,"Flow duration reached: do not generate packet for this pair of src=%d, dst =%d, duration %d < ctime %d + start %d\n",
LOG_T(OTG,"Flow duration reached: do not generate packet for this pair of src=%d, dst =%d, duration %d < ctime %d + start %d\n",
src, dst,g_otg->flow_duration[src][dst][app], ctime, g_otg->flow_start[src][dst][app]);
size+=0;
}else if ((g_otg->application_type[src][dst][app]==0)&&(g_otg->idt_dist[src][dst][app][PE_STATE]==0)){
LOG_D(OTG,"Do not generate packet for this pair of src=%d, dst =%d, IDT zero and app %d not specificed\n", src, dst, app);
LOG_T(OTG,"Do not generate packet for this pair of src=%d, dst =%d, IDT zero and app %d not specificed\n", src, dst, app);
size+=0;
}
......@@ -835,11 +835,11 @@ int k;
g_otg->trans_proto[i][j][k] = UDP;
g_otg->ip_v[i][j][k] = IPV4;
g_otg->idt_dist[i][j][k][PE_STATE] = FIXED;
g_otg->idt_min[i][j][k][PE_STATE] = (int)round(uniform_dist((i+1)*30,(i+1)*100));// 500+(i+1)*10; //random idt among different UEs
g_otg->idt_max[i][j][k][PE_STATE] = 10;
g_otg->idt_min[i][j][k][PE_STATE] = 20;//(int)round(uniform_dist((i+1)*30,(i+1)*100));// 500+(i+1)*10; //random idt among different UEs
g_otg->idt_max[i][j][k][PE_STATE] = 20;
g_otg->size_dist[i][j][k][PE_STATE] = FIXED;
g_otg->size_min[i][j][k][PE_STATE] = 16;
g_otg->size_max[i][j][k][PE_STATE] = 50;
g_otg->size_min[i][j][k][PE_STATE] = 128;
g_otg->size_max[i][j][k][PE_STATE] = 128;
LOG_I(OTG,"OTG_CONFIG SCBR, src = %d, dst = %d, traffic id %d, idt %d dist type for size = %d\n", i, j, k,
g_otg->idt_min[i][j][k][PE_STATE], g_otg->size_min[i][j][k][PE_STATE]);
#ifdef STANDALONE
......@@ -851,11 +851,11 @@ int k;
g_otg->trans_proto[i][j][k] = UDP;
g_otg->ip_v[i][j][k] = IPV4;
g_otg->idt_dist[i][j][k][PE_STATE] = FIXED;
g_otg->idt_min[i][j][k][PE_STATE] = (int)round(uniform_dist((i+1)*30, (i+1)*100));// 250+(i+1)*10;
g_otg->idt_max[i][j][k][PE_STATE] = 10;
g_otg->idt_min[i][j][k][PE_STATE] = 20;//(int)round(uniform_dist((i+1)*30, (i+1)*100));// 250+(i+1)*10;
g_otg->idt_max[i][j][k][PE_STATE] = 20;
g_otg->size_dist[i][j][k][PE_STATE] = FIXED;
g_otg->size_min[i][j][k][PE_STATE] = 32;
g_otg->size_max[i][j][k][PE_STATE] = 512;
g_otg->size_min[i][j][k][PE_STATE] = 768;
g_otg->size_max[i][j][k][PE_STATE] = 768;
LOG_I(OTG,"OTG_CONFIG MCBR, src = %d, dst = %d, traffic id %d, dist type for size = %d\n", i, j,k , g_otg->size_dist[i][j][k][PE_STATE]);
#ifdef STANDALONE
g_otg->dst_port[i][j] = 302;
......@@ -866,11 +866,11 @@ int k;
g_otg->trans_proto[i][j][k] = UDP;
g_otg->ip_v[i][j][k] = IPV4;
g_otg->idt_dist[i][j][k][PE_STATE] = FIXED;// main param in this mode
g_otg->idt_min[i][j][k][PE_STATE] = (int)round(uniform_dist((i+1)*30,(i+1)*100)); //125+(i+1)*10;
g_otg->idt_max[i][j][k][PE_STATE] = 10;
g_otg->idt_min[i][j][k][PE_STATE] = 20;// (int)round(uniform_dist((i+1)*30,(i+1)*100)); //125+(i+1)*10;
g_otg->idt_max[i][j][k][PE_STATE] = 20;
g_otg->size_dist[i][j][k][PE_STATE] = FIXED; // main param in this mode
g_otg->size_min[i][j][k][PE_STATE] = 128;// main param in this mode
g_otg->size_max[i][j][k][PE_STATE] = 1024;
g_otg->size_min[i][j][k][PE_STATE] = 1400;// main param in this mode
g_otg->size_max[i][j][k][PE_STATE] = 1400;
LOG_I(OTG,"OTG_CONFIG BCBR, src = %d, dst = %d, dist type for size = %d\n", i, j, g_otg->size_dist[i][j][k][PE_STATE]);
#ifdef STANDALONE
g_otg->dst_port[i][j] = 302;
......
......@@ -4,7 +4,7 @@
<LARGE_SCALE>urban</LARGE_SCALE>
<FREE_SPACE_MODEL_PARAMETERS>
<PATHLOSS_EXPONENT>2</PATHLOSS_EXPONENT>
<PATHLOSS_0_dB>-50</PATHLOSS_0_dB><!--pathloss at 1km -->
<PATHLOSS_0_dB>-108</PATHLOSS_0_dB><!--pathloss at 1km -->
</FREE_SPACE_MODEL_PARAMETERS>
<SMALL_SCALE>AWGN</SMALL_SCALE>
</FADING>
......@@ -62,8 +62,8 @@
<PREDEFINED_TRAFFIC>
<SOURCE_ID>1</SOURCE_ID> <!-- valid formats are "n:m" and "n,m" and "n" -->
<FLOW_START_ms>200</FLOW_START_ms> <!-- indicates the start time of the app or the actual duration of the traffic-->
<FLOW_DURATION_ms>500</FLOW_DURATION_ms>
<APPLICATION_TYPE>scbr</APPLICATION_TYPE>
<FLOW_DURATION_ms>600</FLOW_DURATION_ms>
<APPLICATION_TYPE>bcbr</APPLICATION_TYPE>
<DESTINATION_ID>0</DESTINATION_ID> <!-- valid formats are "n:m" and "n,m" and "n" -->
</PREDEFINED_TRAFFIC>
</APPLICATION_CONFIG>
......
......@@ -17,7 +17,7 @@
<MOBILITY>
<UE_MOBILITY>
<RANDOM_UE_DISTRIBUTION>
<NUMBER_OF_NODES>4</NUMBER_OF_NODES>
<NUMBER_OF_NODES>9</NUMBER_OF_NODES>
</RANDOM_UE_DISTRIBUTION>
<UE_MOBILITY_TYPE>STATIC</UE_MOBILITY_TYPE> <!-- STATIC -->
</UE_MOBILITY>
......@@ -37,7 +37,7 @@
<SOURCE_ID>0</SOURCE_ID>
<TRANSPORT_PROTOCOL>udp</TRANSPORT_PROTOCOL> <!-- OPTIONS: tcp (default), udp -->
<IP_VERSION>ipv6</IP_VERSION> <!-- OPTIONS: ipv4 (default), ipv6 -->
<DESTINATION_ID>1:3</DESTINATION_ID>
<DESTINATION_ID>1:9</DESTINATION_ID>
<FLOW_START_ms>1000</FLOW_START_ms> <!-- indicates the start time of the app or the actual duration of the traffic-->
<FLOW_DURATION_ms>10000</FLOW_DURATION_ms> <!-- indicates the start time of the app or the actual duration of the traffic-->
......@@ -51,7 +51,7 @@
</CUSTOMIZED_TRAFFIC>
<CUSTOMIZED_TRAFFIC>
<SOURCE_ID>1:3</SOURCE_ID>
<SOURCE_ID>1:9</SOURCE_ID>
<TRANSPORT_PROTOCOL>udp</TRANSPORT_PROTOCOL> <!-- OPTIONS: tcp (default), udp -->
<IP_VERSION>ipv6</IP_VERSION> <!-- OPTIONS: ipv4 (default), ipv6 -->
<DESTINATION_ID>0</DESTINATION_ID>
......
......@@ -71,7 +71,7 @@
#include "oaisim.h"
#define RF
#define DEBUG_SIM
//#define DEBUG_SIM
int number_rb_ul;
int first_rbUL ;
......@@ -284,7 +284,7 @@ void do_DL_sig(double **r_re0,double **r_im0,
#ifdef DEBUG_SIM
for (i=0;i<eNB2UE[eNB_id][UE_id][CC_id]->channel_length;i++)
printf("ch(%d,%d)[%d] : (%f,%f)\n",eNB_id,UE_id,i,eNB2UE[eNB_id][UE_id][CC_id]->ch[0][i].x,eNB2UE[eNB_id][UE_id][CC_id]->ch[0][i].y);
LOG_D(OCM,"channel(%d,%d)[%d] : (%f,%f)\n",eNB_id,UE_id,i,eNB2UE[eNB_id][UE_id][CC_id]->ch[0][i].x,eNB2UE[eNB_id][UE_id][CC_id]->ch[0][i].y);
#endif
LOG_D(OCM,"[SIM][DL] Channel eNB %d => UE %d (CCid %d): tx_power %f dBm/RE, path_loss %f dB\n",
......
......@@ -573,7 +573,7 @@ int ocg_config_env(void) {
oai_emulation.info.frame_type[CC_id]=TDD;
}
else
LOG_I(EMU,"Frame type is %s \n",oai_emulation.info.frame_type_name);
LOG_I(EMU,"Frame type is %s \n",oai_emulation.info.frame_type_name[CC_id]);
if (oai_emulation.info.frame_type[CC_id] == TDD ){
if ((oai_emulation.info.tdd_config[CC_id] > 6) || (oai_emulation.info.tdd_config[CC_id] < 0)) {
LOG_E(EMU,"TDD config %d out of range, set it to 3\n",oai_emulation.info.tdd_config[CC_id]);
......
......@@ -105,7 +105,7 @@ double forgetting_factor = 0.0;
uint8_t beta_ACK = 0;
uint8_t beta_RI = 0;
uint8_t beta_CQI = 2;
uint8_t target_ul_mcs = 4;
uint8_t target_ul_mcs = 16;
LTE_DL_FRAME_PARMS *frame_parms[MAX_NUM_CCs];
int map1,map2;
double **ShaF = NULL;
......@@ -883,7 +883,7 @@ void init_openair2(void) {
module_id_t enb_id;
module_id_t UE_id;
int CC_id;
#warning "eNB index is hard coded to zero"
for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++)
l2_init (&PHY_vars_eNB_g[0][CC_id]->lte_frame_parms,
oai_emulation.info.eMBMS_active_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment