Commit 8ffe5b49 authored by Robert Schmidt's avatar Robert Schmidt

Rename UE_list_t to UE_info_t, UE_list_t separate struct

UE_list_t is now used as a separate list. It can be e.g. used in the
scheduler to mark groups of users. The original UE_list_t is renamed to
UE_info_t to reflect that it is a container for information about UEs.
parent 7c924563
......@@ -182,7 +182,7 @@ void phy_scope_gNB(FD_phy_scope_gnb *form,
int Qm = 2;
/*
if (!RC.nrmac[0]->UE_list.active[UE_id])
if (!RC.nrmac[0]->UE_info.active[UE_id])
return;
// choose max MCS to compute coded_bits_per_codeword
......
......@@ -640,6 +640,10 @@ uci_procedures(PHY_VARS_eNB *eNB,
uci = &(eNB->uci_vars[i]);
if ((uci->active == 1) && (uci->frame == frame) && (uci->subframe == subframe)) {
if (uci->ue_id > MAX_MOBILES_PER_ENB) {
LOG_W(PHY, "UCI for UE %d and/or but is not active in MAC\n", uci->ue_id);
continue;
}
LOG_D(PHY,"Frame %d, subframe %d: Running uci procedures (type %d) for %d \n",
frame,
subframe,
......@@ -1239,11 +1243,11 @@ void pusch_procedures(PHY_VARS_eNB *eNB,L1_rxtx_proc_t *proc) {
print_CQI(ulsch_harq->o,ulsch_harq->uci_format,0,fp->N_RB_DL);
#endif
fill_ulsch_cqi_indication(eNB,frame,subframe,ulsch_harq,ulsch->rnti);
RC.mac[eNB->Mod_id]->UE_list.UE_sched_ctrl[i].cqi_req_flag &= (~(1 << subframe));
RC.mac[eNB->Mod_id]->UE_info.UE_sched_ctrl[i].cqi_req_flag &= (~(1 << subframe));
} else {
if(RC.mac[eNB->Mod_id]->UE_list.UE_sched_ctrl[i].cqi_req_flag & (1 << subframe) ) {
RC.mac[eNB->Mod_id]->UE_list.UE_sched_ctrl[i].cqi_req_flag &= (~(1 << subframe));
RC.mac[eNB->Mod_id]->UE_list.UE_sched_ctrl[i].cqi_req_timer=30;
if(RC.mac[eNB->Mod_id]->UE_info.UE_sched_ctrl[i].cqi_req_flag & (1 << subframe) ) {
RC.mac[eNB->Mod_id]->UE_info.UE_sched_ctrl[i].cqi_req_flag &= (~(1 << subframe));
RC.mac[eNB->Mod_id]->UE_info.UE_sched_ctrl[i].cqi_req_timer=30;
LOG_D(PHY,"Frame %d,Subframe %d, We're supposed to get a cqi here. Set cqi_req_timer to 30.\n",frame,subframe);
}
}
......
......@@ -939,7 +939,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
for (j = 0; j < 8; j++) {
if (RC.mac && RC.mac[mod_id] && RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id,i)][i].harq_pid == 1) {
if (RC.mac && RC.mac[mod_id] && RC.mac[mod_id]->UE_info.eNB_UE_stats[UE_PCCID(mod_id,i)][i].harq_pid == 1) {
available_harq[i] = j;
break;
}
......@@ -977,13 +977,13 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
dl_info[i]->harq_process_id = available_harq[UE_id];
if (RC.mac && RC.mac[mod_id])
RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id, UE_id)][UE_id].harq_pid = 0;
RC.mac[mod_id]->UE_info.eNB_UE_stats[UE_PCCID(mod_id, UE_id)][UE_id].harq_pid = 0;
dl_info[i]->has_harq_process_id = 1;
/* Fill in the status of the HARQ process (2 TBs)*/
dl_info[i]->n_harq_status = 2;
dl_info[i]->harq_status = malloc(sizeof(uint32_t) * dl_info[i]->n_harq_status);
for (j = 0; j < dl_info[i]->n_harq_status; j++) {
dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_list.UE_sched_ctrl[UE_id].round[UE_PCCID(mod_id, UE_id)][j];
dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_info.UE_sched_ctrl[UE_id].round[UE_PCCID(mod_id, UE_id)][j];
// TODO: This should be different per TB
}
// LOG_I(FLEXRAN_AGENT, "Sending subframe trigger for frame %d and subframe %d and harq %d (round %d)\n", flexran_get_current_frame(mod_id), (flexran_get_current_subframe(mod_id) + 1) % 10, dl_info[i]->harq_process_id, dl_info[i]->harq_status[0]);
......@@ -1357,7 +1357,7 @@ void flexran_agent_init_mac_agent(mid_t mod_id) {
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
for (j = 0; j < 8; j++) {
if (RC.mac && RC.mac[mod_id])
RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id,i)][i].harq_pid = 0;
RC.mac[mod_id]->UE_info.eNB_UE_stats[UE_PCCID(mod_id,i)][i].harq_pid = 0;
}
}
}
......
......@@ -98,7 +98,7 @@ uint16_t flexran_get_future_sfn_sf(mid_t mod_id, int ahead_of_time) {
int flexran_get_mac_num_ues(mid_t mod_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.num_UEs;
return RC.mac[mod_id]->UE_info.num_UEs;
}
int flexran_get_num_ue_lcs(mid_t mod_id, mid_t ue_id) {
......@@ -123,7 +123,7 @@ int flexran_get_mac_ue_id_rnti(mid_t mod_id, rnti_t rnti) {
/* get the (active) UE with RNTI i */
for (n = 0; n < MAX_MOBILES_PER_ENB; ++n) {
if (RC.mac[mod_id]->UE_list.active[n] == TRUE
if (RC.mac[mod_id]->UE_info.active[n] == TRUE
&& rnti == UE_RNTI(mod_id, n)) {
return n;
}
......@@ -139,7 +139,7 @@ int flexran_get_mac_ue_id(mid_t mod_id, int i) {
/* get the (i+1)'th active UE */
for (n = 0; n < MAX_MOBILES_PER_ENB; ++n) {
if (RC.mac[mod_id]->UE_list.active[n] == TRUE) {
if (RC.mac[mod_id]->UE_info.active[n] == TRUE) {
if (i == 0)
return n;
......@@ -159,19 +159,19 @@ rnti_t flexran_get_mac_ue_crnti(mid_t mod_id, mid_t ue_id) {
int flexran_get_ue_bsr_ul_buffer_info(mid_t mod_id, mid_t ue_id, lcid_t lcid) {
if (!mac_is_present(mod_id)) return -1;
return RC.mac[mod_id]->UE_list.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].ul_buffer_info[lcid];
return RC.mac[mod_id]->UE_info.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].ul_buffer_info[lcid];
}
int8_t flexran_get_ue_phr(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].phr_info;
return RC.mac[mod_id]->UE_info.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].phr_info;
}
uint8_t flexran_get_ue_wcqi(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_sched_ctrl[ue_id].dl_cqi[0];
return RC.mac[mod_id]->UE_info.UE_sched_ctrl[ue_id].dl_cqi[0];
}
rlc_buffer_occupancy_t flexran_get_tx_queue_size(mid_t mod_id, mid_t ue_id, logical_chan_id_t channel_id) {
......@@ -242,7 +242,7 @@ int32_t flexran_get_TA(mid_t mod_id, mid_t ue_id, uint8_t cc_id) {
uint32_t flexran_get_total_size_dl_mac_sdus(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_sdu_bytes;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_sdu_bytes;
}
uint32_t flexran_get_total_size_ul_mac_sdus(mid_t mod_id, mid_t ue_id, int cc_id) {
......@@ -251,7 +251,7 @@ uint32_t flexran_get_total_size_ul_mac_sdus(mid_t mod_id, mid_t ue_id, int cc_id
uint64_t bytes = 0;
for (int i = 0; i < NB_RB_MAX; ++i) {
bytes += RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].num_bytes_rx[i];
bytes += RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].num_bytes_rx[i];
}
return bytes;
......@@ -260,135 +260,135 @@ uint32_t flexran_get_total_size_ul_mac_sdus(mid_t mod_id, mid_t ue_id, int cc_id
uint32_t flexran_get_TBS_dl(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].TBS;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].TBS;
}
uint32_t flexran_get_TBS_ul(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].ulsch_TBS;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].ulsch_TBS;
}
uint16_t flexran_get_num_prb_retx_dl_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].rbs_used_retx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].rbs_used_retx;
}
uint32_t flexran_get_num_prb_retx_ul_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].rbs_used_retx_rx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].rbs_used_retx_rx;
}
uint16_t flexran_get_num_prb_dl_tx_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].rbs_used;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].rbs_used;
}
uint16_t flexran_get_num_prb_ul_rx_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].rbs_used_rx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].rbs_used_rx;
}
uint8_t flexran_get_ue_wpmi(mid_t mod_id, mid_t ue_id, uint8_t cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_sched_ctrl[ue_id].periodic_wideband_pmi[cc_id];
return RC.mac[mod_id]->UE_info.UE_sched_ctrl[ue_id].periodic_wideband_pmi[cc_id];
}
uint8_t flexran_get_mcs1_dl(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs1;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].dlsch_mcs1;
}
uint8_t flexran_get_mcs2_dl(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs2;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].dlsch_mcs2;
}
uint8_t flexran_get_mcs1_ul(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].ulsch_mcs1;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].ulsch_mcs1;
}
uint8_t flexran_get_mcs2_ul(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].ulsch_mcs2;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].ulsch_mcs2;
}
uint32_t flexran_get_total_prb_dl_tx_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_rbs_used;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_rbs_used;
}
uint32_t flexran_get_total_prb_ul_rx_per_ue(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_rbs_used_rx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_rbs_used_rx;
}
uint32_t flexran_get_total_num_pdu_dl(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_num_pdus;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_num_pdus;
}
uint32_t flexran_get_total_num_pdu_ul(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_num_pdus_rx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_num_pdus_rx;
}
uint64_t flexran_get_total_TBS_dl(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_pdu_bytes;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_pdu_bytes;
}
uint64_t flexran_get_total_TBS_ul(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].total_ulsch_TBS;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].total_ulsch_TBS;
}
int flexran_get_harq_round(mid_t mod_id, uint8_t cc_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].harq_round;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].harq_round;
}
uint32_t flexran_get_num_mac_sdu_tx(mid_t mod_id, mid_t ue_id, int cc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].num_mac_sdu_tx;
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].num_mac_sdu_tx;
}
unsigned char flexran_get_mac_sdu_lcid_index(mid_t mod_id, mid_t ue_id, int cc_id, int index) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].lcid_sdu[index];
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].lcid_sdu[index];
}
uint32_t flexran_get_mac_sdu_size(mid_t mod_id, mid_t ue_id, int cc_id, int lcid) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].sdu_length_tx[lcid];
return RC.mac[mod_id]->UE_info.eNB_UE_stats[cc_id][ue_id].sdu_length_tx[lcid];
}
/* TODO needs to be revised */
void flexran_update_TA(mid_t mod_id, mid_t ue_id, uint8_t cc_id) {
/*
UE_list_t *UE_list=&eNB_mac_inst[mod_id].UE_list;
UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[ue_id];
UE_info_t *UE_info=&eNB_mac_inst[mod_id].UE_info;
UE_sched_ctrl *ue_sched_ctl = &UE_info->UE_sched_ctrl[ue_id];
if (ue_sched_ctl->ta_timer == 0) {
......@@ -432,7 +432,7 @@ int flexran_get_MAC_CE_bitmap_TA(mid_t mod_id, mid_t ue_id, uint8_t cc_id) {
int flexran_get_active_CC(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.numactiveCCs[ue_id];
return RC.mac[mod_id]->UE_info.numactiveCCs[ue_id];
}
uint8_t flexran_get_current_RI(mid_t mod_id, mid_t ue_id, uint8_t cc_id) {
......@@ -890,13 +890,13 @@ uint8_t flexran_get_rrc_status(mid_t mod_id, rnti_t rnti) {
uint64_t flexran_get_ue_aggregated_max_bitrate_dl(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_sched_ctrl[ue_id].ue_AggregatedMaximumBitrateDL;
return RC.mac[mod_id]->UE_info.UE_sched_ctrl[ue_id].ue_AggregatedMaximumBitrateDL;
}
uint64_t flexran_get_ue_aggregated_max_bitrate_ul(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_sched_ctrl[ue_id].ue_AggregatedMaximumBitrateUL;
return RC.mac[mod_id]->UE_info.UE_sched_ctrl[ue_id].ue_AggregatedMaximumBitrateUL;
}
int flexran_get_half_duplex(mid_t mod_id, rnti_t rnti) {
......@@ -1235,7 +1235,7 @@ uint64_t flexran_get_ue_imsi(mid_t mod_id, rnti_t rnti) {
long flexran_get_lcg(mid_t mod_id, mid_t ue_id, mid_t lc_id) {
if (!mac_is_present(mod_id)) return 0;
return RC.mac[mod_id]->UE_list.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].lcgidmap[lc_id];
return RC.mac[mod_id]->UE_info.UE_template[UE_PCCID(mod_id, ue_id)][ue_id].lcgidmap[lc_id];
}
/* TODO Navid: needs to be revised */
......@@ -3016,7 +3016,7 @@ uint32_t flexran_get_rrc_enb_ue_s1ap_id(mid_t mod_id, rnti_t rnti)
int flexran_get_ue_dl_slice_id(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return -1;
int slice_idx = 0; //RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[ue_id];
int slice_idx = 0; //RC.mac[mod_id]->UE_info.assoc_dl_slice_idx[ue_id];
if (slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_dl)
return RC.mac[mod_id]->slice_info.dl[slice_idx].id;
......@@ -3031,13 +3031,13 @@ void flexran_set_ue_dl_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) {
if (!flexran_dl_slice_exists(mod_id, slice_idx)) return;
//RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[ue_id] = slice_idx;
//RC.mac[mod_id]->UE_info.assoc_dl_slice_idx[ue_id] = slice_idx;
}
int flexran_get_ue_ul_slice_id(mid_t mod_id, mid_t ue_id) {
if (!mac_is_present(mod_id)) return -1;
int slice_idx = RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[ue_id];
int slice_idx = RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id];
if (slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_ul)
return RC.mac[mod_id]->slice_info.ul[slice_idx].id;
......@@ -3052,7 +3052,7 @@ void flexran_set_ue_ul_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) {
if (!flexran_ul_slice_exists(mod_id, slice_idx)) return;
RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[ue_id] = slice_idx;
RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id] = slice_idx;
}
int flexran_dl_slice_exists(mid_t mod_id, int slice_idx) {
......@@ -3103,7 +3103,7 @@ int flexran_remove_dl_slice(mid_t mod_id, int slice_idx) {
memset(&sli->dl[sli->n_dl], 0, sizeof(sli->dl[sli->n_dl]));
/* all UEs that have been in the old slice are put into slice index 0 */
//int *assoc_list = RC.mac[mod_id]->UE_list.assoc_dl_slice_idx;
//int *assoc_list = RC.mac[mod_id]->UE_info.assoc_dl_slice_idx;
//for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) {
// if (assoc_list[i] == slice_idx)
......@@ -3397,7 +3397,7 @@ int flexran_remove_ul_slice(mid_t mod_id, int slice_idx) {
memset(&sli->ul[sli->n_ul], 0, sizeof(sli->ul[sli->n_ul]));
/* all UEs that have been in the old slice are put into slice index 0 */
int *assoc_list = RC.mac[mod_id]->UE_list.assoc_ul_slice_idx;
int *assoc_list = RC.mac[mod_id]->UE_info.assoc_ul_slice_idx;
for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) {
if (assoc_list[i] == slice_idx)
......
......@@ -697,7 +697,7 @@ int DU_send_UL_RRC_MESSAGE_TRANSFER(instance_t instance,
break;
}
UE_sched_ctrl_t *UE_scheduling_control = &(RC.mac[instance]->UE_list.UE_sched_ctrl[UE_id_mac]);
UE_sched_ctrl_t *UE_scheduling_control = &(RC.mac[instance]->UE_info.UE_sched_ctrl[UE_id_mac]);
if (UE_scheduling_control->cdrx_waiting_ack == TRUE) {
UE_scheduling_control->cdrx_waiting_ack = FALSE;
......
......@@ -633,9 +633,9 @@ int DU_handle_UE_CONTEXT_RELEASE_COMMAND(instance_t instance,
int UE_out_of_sync = 0;
for (int n = 0; n < MAX_MOBILES_PER_ENB; ++n) {
if (RC.mac[instance]->UE_list.active[n] == TRUE
if (RC.mac[instance]->UE_info.active[n] == TRUE
&& rnti == UE_RNTI(instance, n)) {
UE_out_of_sync = RC.mac[instance]->UE_list.UE_sched_ctrl[n].ul_out_of_sync;
UE_out_of_sync = RC.mac[instance]->UE_info.UE_sched_ctrl[n].ul_out_of_sync;
break;
}
}
......
......@@ -782,7 +782,7 @@ int rrc_mac_config_req_eNB(module_id_t Mod_idP,
int i;
int UE_id = -1;
eNB_MAC_INST *eNB = RC.mac[Mod_idP];
UE_list_t *UE_list= &eNB->UE_list;
UE_info_t *UE_info= &eNB->UE_info;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RRC_MAC_CONFIG, VCD_FUNCTION_IN);
LOG_D(MAC, "RC.mac:%p mib:%p\n", RC.mac, mib);
......@@ -892,9 +892,9 @@ int rrc_mac_config_req_eNB(module_id_t Mod_idP,
}
if (logicalChannelConfig) {
UE_list->UE_template[CC_idP][UE_id].lcgidmap[logicalChannelIdentity] = *logicalChannelConfig->ul_SpecificParameters->logicalChannelGroup;
UE_list->UE_template[CC_idP][UE_id].lcgidpriority[logicalChannelIdentity] = logicalChannelConfig->ul_SpecificParameters->priority;
} else UE_list->UE_template[CC_idP][UE_id].lcgidmap[logicalChannelIdentity] = 0;
UE_info->UE_template[CC_idP][UE_id].lcgidmap[logicalChannelIdentity] = *logicalChannelConfig->ul_SpecificParameters->logicalChannelGroup;
UE_info->UE_template[CC_idP][UE_id].lcgidpriority[logicalChannelIdentity] = logicalChannelConfig->ul_SpecificParameters->priority;
} else UE_info->UE_template[CC_idP][UE_id].lcgidmap[logicalChannelIdentity] = 0;
}
if (physicalConfigDedicated != NULL) {
......@@ -905,7 +905,7 @@ int rrc_mac_config_req_eNB(module_id_t Mod_idP,
return(-1);
}
UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated = physicalConfigDedicated;
UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated = physicalConfigDedicated;
LOG_I(MAC,"Added physicalConfigDedicated %p for %d.%d\n",physicalConfigDedicated,CC_idP,UE_id);
}
......@@ -1048,15 +1048,11 @@ void eNB_Config_Local_DRX(instance_t Mod_id,
rrc_mac_drx_config_req_t *rrc_mac_drx_config_req)
//-----------------------------------------------------------------------------
{
UE_list_t *UE_list_mac = NULL;
UE_info_t *UE_info_mac = &RC.mac[Mod_id]->UE_info;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
int UE_id = -1;
rnti_t rnti = rrc_mac_drx_config_req->rnti;
LTE_DRX_Config_t *const drx_Configuration = rrc_mac_drx_config_req->drx_Configuration;
UE_list_mac = &(RC.mac[Mod_id]->UE_list);
UE_id = find_UE_id(Mod_id, rnti);
rnti_t rnti = rrc_mac_drx_config_req->rnti;
int UE_id = find_UE_id(Mod_id, rnti);
/* Check UE_id */
if (UE_id == -1) {
......@@ -1065,7 +1061,7 @@ void eNB_Config_Local_DRX(instance_t Mod_id,
}
/* Get struct to modify */
UE_scheduling_control = &(UE_list_mac->UE_sched_ctrl[UE_id]);
UE_scheduling_control = &(UE_info_mac->UE_sched_ctrl[UE_id]);
UE_scheduling_control->cdrx_configured = FALSE; // will be set to true when no error
/* Check drx_Configuration */
......
......@@ -88,7 +88,7 @@ void schedule_SRS(module_id_t module_idP,
const uint16_t deltaTSFCTabType1[15][2] = { {1, 1}, {1, 2}, {2, 2}, {1, 5}, {2, 5}, {4, 5}, {8, 5}, {3, 5}, {12, 5}, {1, 10}, {2, 10}, {4, 10}, {8, 10}, {351, 10}, {383, 10} }; // Table 5.5.3.3-2 3GPP 36.211 FDD
const uint16_t deltaTSFCTabType2[14][2] = { {2, 5}, {6, 5}, {10, 5}, {18, 5}, {14, 5}, {22, 5}, {26, 5}, {30, 5}, {70, 10}, {74, 10}, {194, 10}, {326, 10}, {586, 10}, {210, 10} }; // Table 5.5.3.3-2 3GPP 36.211 TDD
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &(eNB->UE_list);
UE_info_t *UE_info = &eNB->UE_info;
nfapi_ul_config_request_body_t *ul_req = NULL;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
COMMON_channels_t *cc = eNB->common_channels;
......@@ -116,7 +116,7 @@ void schedule_SRS(module_id_t module_idP,
if ((1 << tmp) & deltaTSFC) {
/* This is an SRS subframe, loop over UEs */
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (!UE_list->active[UE_id]) {
if (!UE_info->active[UE_id]) {
continue;
}
......@@ -125,14 +125,14 @@ void schedule_SRS(module_id_t module_idP,
continue;
}
if(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated == NULL) {
if(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated == NULL) {
LOG_E(MAC,"physicalConfigDedicated is null for UE %d\n",UE_id);
printf("physicalConfigDedicated is null for UE %d\n",UE_id);
return;
}
/* CDRX condition on Active Time and SRS type-0 report (36.321 5.7) */
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_scheduling_control = &(UE_info->UE_sched_ctrl[UE_id]);
/* Test if Active Time not running since 6+ subframes */
if (UE_scheduling_control->cdrx_configured == TRUE && UE_scheduling_control->in_active_time == FALSE) {
......@@ -144,7 +144,7 @@ void schedule_SRS(module_id_t module_idP,
ul_req = &(eNB->UL_req[CC_id].ul_config_request_body);
if ((soundingRS_UL_ConfigDedicated = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->soundingRS_UL_ConfigDedicated) != NULL) {
if ((soundingRS_UL_ConfigDedicated = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->soundingRS_UL_ConfigDedicated) != NULL) {
if (soundingRS_UL_ConfigDedicated->present == LTE_SoundingRS_UL_ConfigDedicated_PR_setup) {
get_srs_pos(&cc[CC_id],
soundingRS_UL_ConfigDedicated->choice.setup.srs_ConfigIndex,
......@@ -160,7 +160,7 @@ void schedule_SRS(module_id_t module_idP,
ul_config_pdu->pdu_size = 2 + (uint8_t) (2 + sizeof(nfapi_ul_config_srs_pdu));
ul_config_pdu->srs_pdu.srs_pdu_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_SRS_PDU_REL8_TAG;
ul_config_pdu->srs_pdu.srs_pdu_rel8.size = (uint8_t)sizeof(nfapi_ul_config_srs_pdu);
ul_config_pdu->srs_pdu.srs_pdu_rel8.rnti = UE_list->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->srs_pdu.srs_pdu_rel8.rnti = UE_info->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->srs_pdu.srs_pdu_rel8.srs_bandwidth = soundingRS_UL_ConfigDedicated->choice.setup.srs_Bandwidth;
ul_config_pdu->srs_pdu.srs_pdu_rel8.frequency_domain_position = soundingRS_UL_ConfigDedicated->choice.setup.freqDomainPosition;
ul_config_pdu->srs_pdu.srs_pdu_rel8.srs_hopping_bandwidth = soundingRS_UL_ConfigDedicated->choice.setup.srs_HoppingBandwidth;;
......@@ -172,7 +172,7 @@ void schedule_SRS(module_id_t module_idP,
ul_req->number_of_pdus++;
} // if (((10*frameP+subframeP) % srsPeriodicity) == srsOffset)
} // if (soundingRS_UL_ConfigDedicated->present == SoundingRS_UL_ConfigDedicated_PR_setup)
} // if ((soundingRS_UL_ConfigDedicated = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->soundingRS_UL_ConfigDedicated)!=NULL)
} // if ((soundingRS_UL_ConfigDedicated = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->soundingRS_UL_ConfigDedicated)!=NULL)
} // end for loop on UE_id
} // if((1<<tmp) & deltaTSFC)
} // SRS config not NULL
......@@ -195,7 +195,7 @@ void schedule_CSI(module_id_t module_idP,
uint16_t N_OFFSET_CQI = 0;
struct LTE_CQI_ReportPeriodic *cqi_ReportPeriodic = NULL;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
COMMON_channels_t *cc = NULL;
nfapi_ul_config_request_body_t *ul_req = NULL;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
......@@ -204,7 +204,7 @@ void schedule_CSI(module_id_t module_idP,
cc = &eNB->common_channels[CC_id];
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] == FALSE) {
if (UE_info->active[UE_id] == FALSE) {
continue;
}
......@@ -213,7 +213,7 @@ void schedule_CSI(module_id_t module_idP,
continue;
}
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated != NULL,
AssertFatal(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated != NULL,
"physicalConfigDedicated is null for UE %d\n",
UE_id);
/*
......@@ -221,7 +221,7 @@ void schedule_CSI(module_id_t module_idP,
* Here we consider classic periodic reports on PUCCH without PUSCH simultaneous transmission condition.
* TODO: add the handling or test on simultaneous PUCCH/PUSCH transmission
*/
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_scheduling_control = &(UE_info->UE_sched_ctrl[UE_id]);
if (UE_scheduling_control->cdrx_configured == TRUE) {
/* Test if CQI masking activated */
......@@ -245,8 +245,8 @@ void schedule_CSI(module_id_t module_idP,
ul_req = &(eNB->UL_req[CC_id].ul_config_request_body);
if (UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL) {
cqi_ReportPeriodic = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportPeriodic;
if (UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL) {
cqi_ReportPeriodic = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportPeriodic;
if (cqi_ReportPeriodic != NULL) {
/* Rel8 Periodic CSI (CQI/PMI/RI) reporting */
......@@ -261,10 +261,10 @@ void schedule_CSI(module_id_t module_idP,
ul_config_pdu->pdu_type = NFAPI_UL_CONFIG_UCI_CQI_PDU_TYPE;
ul_config_pdu->pdu_size = 2 + (uint8_t) (2 + sizeof(nfapi_ul_config_uci_cqi_pdu));
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_UE_INFORMATION_REL8_TAG;
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.rnti = UE_list->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.rnti = UE_info->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_CQI_INFORMATION_REL8_TAG;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.pucch_index = cqi_ReportPeriodic->choice.setup.cqi_PUCCH_ResourceIndex;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.dl_cqi_pmi_size = get_rel8_dl_cqi_pmi_size(&UE_list->UE_sched_ctrl[UE_id], CC_id, cc, get_tmode(module_idP, CC_id, UE_id),
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.dl_cqi_pmi_size = get_rel8_dl_cqi_pmi_size(&UE_info->UE_sched_ctrl[UE_id], CC_id, cc, get_tmode(module_idP, CC_id, UE_id),
cqi_ReportPeriodic);
ul_req->number_of_pdus++;
ul_req->tl.tag = NFAPI_UL_CONFIG_REQUEST_BODY_TAG;
......@@ -278,7 +278,7 @@ void schedule_CSI(module_id_t module_idP,
ul_config_pdu->pdu_type = NFAPI_UL_CONFIG_UCI_CQI_PDU_TYPE;
ul_config_pdu->pdu_size = 2 + (uint8_t) (2 + sizeof(nfapi_ul_config_uci_cqi_pdu));
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_UE_INFORMATION_REL8_TAG;
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.rnti = UE_list->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->uci_cqi_pdu.ue_information.ue_information_rel8.rnti = UE_info->UE_template[CC_id][UE_id].rnti;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_CQI_INFORMATION_REL8_TAG;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.pucch_index = cqi_ReportPeriodic->choice.setup.cqi_PUCCH_ResourceIndex;
ul_config_pdu->uci_cqi_pdu.cqi_information.cqi_information_rel8.dl_cqi_pmi_size = (cc->p_eNB == 2) ? 1 : 2;
......@@ -290,7 +290,7 @@ void schedule_CSI(module_id_t module_idP,
} // if CSI Periodic is not release state
} // if (cqi_ReportPeriodic != NULL)
} // if cqi_ReportConfig != NULL
} // for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
} // for (UE_id=UE_info->head; UE_id>=0; UE_id=UE_info->next[UE_id]) {
} // for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
}
......@@ -308,7 +308,7 @@ schedule_SR (module_id_t module_idP,
int is_harq = 0;
int pdu_list_index = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
nfapi_ul_config_request_t *ul_req = NULL;
nfapi_ul_config_request_body_t *ul_req_body = NULL;
LTE_SchedulingRequestConfig_t *SRconfig = NULL;
......@@ -318,13 +318,13 @@ schedule_SR (module_id_t module_idP,
eNB->UL_req[CC_id].sfn_sf = (frameP << 4) + subframeP;
for (int UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (!UE_list->active[UE_id]) {
if (!UE_info->active[UE_id]) {
continue;
}
if (UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated == NULL) continue;
if (UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated == NULL) continue;
if ((SRconfig = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig) != NULL) {
if ((SRconfig = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig) != NULL) {
if (SRconfig->present == LTE_SchedulingRequestConfig_PR_setup) {
if (SRconfig->choice.setup.sr_ConfigIndex <= 4) { // 5 ms SR period
if ((subframeP % 5) != SRconfig->choice.setup.sr_ConfigIndex) continue;
......@@ -338,7 +338,7 @@ schedule_SR (module_id_t module_idP,
if ((10 * (frameP & 7) + subframeP) != (SRconfig->choice.setup.sr_ConfigIndex - 75)) continue;
}
} // SRconfig->present == SchedulingRequestConfig_PR_setup)
} // SRconfig = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig)!=NULL)
} // SRconfig = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig)!=NULL)
/* If we get here there is some PUCCH1 reception to schedule for SR */
ul_req = &(eNB->UL_req[CC_id]);
......@@ -353,14 +353,14 @@ schedule_SR (module_id_t module_idP,
(ul_req_body->ul_config_pdu_list[i].pdu_type == NFAPI_UL_CONFIG_ULSCH_HARQ_PDU_TYPE) ||
(ul_req_body->ul_config_pdu_list[i].pdu_type == NFAPI_UL_CONFIG_ULSCH_CQI_RI_PDU_TYPE) ||
(ul_req_body->ul_config_pdu_list[i].pdu_type == NFAPI_UL_CONFIG_ULSCH_CQI_HARQ_RI_PDU_TYPE)) &&
(ul_req_body->ul_config_pdu_list[i].ulsch_pdu.ulsch_pdu_rel8.rnti == UE_list->UE_template[CC_id][UE_id].rnti)) {
(ul_req_body->ul_config_pdu_list[i].ulsch_pdu.ulsch_pdu_rel8.rnti == UE_info->UE_template[CC_id][UE_id].rnti)) {
skip_ue = 1;
pdu_list_index = i;
break;
}
/* If there is already an HARQ pdu, convert to SR_HARQ */
else if ((ul_req_body->ul_config_pdu_list[i].pdu_type == NFAPI_UL_CONFIG_UCI_HARQ_PDU_TYPE) &&
(ul_req_body->ul_config_pdu_list[i].uci_harq_pdu.ue_information.ue_information_rel8.rnti == UE_list->UE_template[CC_id][UE_id].rnti)) {
(ul_req_body->ul_config_pdu_list[i].uci_harq_pdu.ue_information.ue_information_rel8.rnti == UE_info->UE_template[CC_id][UE_id].rnti)) {
is_harq = 1;
pdu_list_index = i;
break;
......@@ -374,20 +374,20 @@ schedule_SR (module_id_t module_idP,
frameP,
subframeP,
UE_id,
UE_list->UE_template[CC_id][UE_id].rnti,
UE_info->UE_template[CC_id][UE_id].rnti,
is_harq);
/* Check Rel10 or Rel8 SR */
if ((UE_list-> UE_template[CC_id][UE_id].physicalConfigDedicated->ext2) &&
(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020) &&
(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020)) {
if ((UE_info-> UE_template[CC_id][UE_id].physicalConfigDedicated->ext2) &&
(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020) &&
(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020)) {
sr.sr_information_rel10.tl.tag = NFAPI_UL_CONFIG_REQUEST_SR_INFORMATION_REL10_TAG;
sr.sr_information_rel10.number_of_pucch_resources = 1;
sr.sr_information_rel10.pucch_index_p1 = *UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020->sr_PUCCH_ResourceIndexP1_r10;
sr.sr_information_rel10.pucch_index_p1 = *UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->ext2->schedulingRequestConfig_v1020->sr_PUCCH_ResourceIndexP1_r10;
LOG_D(MAC, "REL10 PUCCH INDEX P1:%d \n", sr.sr_information_rel10.pucch_index_p1);
} else {
sr.sr_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_SR_INFORMATION_REL8_TAG;
sr.sr_information_rel8.pucch_index = UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig->choice.setup.sr_PUCCH_ResourceIndex;
sr.sr_information_rel8.pucch_index = UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->schedulingRequestConfig->choice.setup.sr_PUCCH_ResourceIndex;
LOG_D(MAC, "REL8 PUCCH INDEX:%d\n", sr.sr_information_rel8.pucch_index);
}
......@@ -400,7 +400,7 @@ schedule_SR (module_id_t module_idP,
} else {
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].pdu_type = NFAPI_UL_CONFIG_UCI_SR_PDU_TYPE;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.ue_information.ue_information_rel8.tl.tag = NFAPI_UL_CONFIG_REQUEST_UE_INFORMATION_REL8_TAG;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.ue_information.ue_information_rel8.rnti = UE_list->UE_template[CC_id][UE_id].rnti;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.ue_information.ue_information_rel8.rnti = UE_info->UE_template[CC_id][UE_id].rnti;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.ue_information.ue_information_rel11.tl.tag = 0;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.ue_information.ue_information_rel13.tl.tag = 0;
ul_req_body->ul_config_pdu_list[ul_req_body->number_of_pdus].uci_sr_pdu.sr_information = sr;
......@@ -415,20 +415,20 @@ schedule_SR (module_id_t module_idP,
void
check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
frame_t frameP, sub_frame_t subframeP) {
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
nfapi_dl_config_request_t *DL_req = &RC.mac[module_idP]->DL_req[0];
uint16_t rnti = UE_RNTI(module_idP, UE_id);
COMMON_channels_t *cc = RC.mac[module_idP]->common_channels;
// check uplink failure
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 0) &&
(UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 0)) {
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer == 1)
if ((UE_info->UE_sched_ctrl[UE_id].ul_failure_timer > 0) &&
(UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 0)) {
if (UE_info->UE_sched_ctrl[UE_id].ul_failure_timer == 1)
LOG_I(MAC, "UE %d rnti %x: UL Failure timer %d \n", UE_id, rnti,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer);
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer);
if (UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent == 0) {
UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 1;
if (UE_info->UE_sched_ctrl[UE_id].ra_pdcch_order_sent == 0) {
UE_info->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 1;
// add a format 1A dci for this UE to request an RA procedure (only one UE per subframe)
nfapi_dl_config_request_pdu_t *dl_config_pdu = &DL_req[CC_id].dl_config_request_body.dl_config_pdu_list[DL_req[CC_id].dl_config_request_body.number_pdu];
memset((void *) dl_config_pdu, 0,sizeof(nfapi_dl_config_request_pdu_t));
......@@ -437,7 +437,7 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1A;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation(get_bw_index(module_idP, CC_id),
UE_list->UE_sched_ctrl[UE_id].
UE_info->UE_sched_ctrl[UE_id].
dl_cqi[CC_id], format1A);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications
......@@ -453,22 +453,22 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
LOG_D(MAC,
"UE %d rnti %x: sending PDCCH order for RAPROC (failure timer %d), resource_block_coding %d \n",
UE_id, rnti,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer,
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer,
dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.resource_block_coding);
} else { // ra_pdcch_sent==1
LOG_D(MAC,
"UE %d rnti %x: sent PDCCH order for RAPROC waiting (failure timer %d) \n",
UE_id, rnti,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer);
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer);
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer % 80) == 0) UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 8 frames
if ((UE_info->UE_sched_ctrl[UE_id].ul_failure_timer % 80) == 0) UE_info->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 8 frames
}
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer++;
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer++;
// check threshold
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 4000) {
if (UE_info->UE_sched_ctrl[UE_id].ul_failure_timer > 4000) {
// note: probably ul_failure_timer should be less than UE radio link failure time(see T310/N310/N311)
if (NODE_IS_DU(RC.rrc[module_idP]->node_type)) {
MessageDef *m = itti_alloc_new_message(TASK_MAC_ENB, F1AP_UE_CONTEXT_RELEASE_REQ);
......@@ -490,17 +490,17 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
rnti);
}
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 0;
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync = 1;
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer = 0;
UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync = 1;
}
} // ul_failure_timer>0
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer++;
UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer++;
if((U_PLANE_INACTIVITY_VALUE != 0) && (UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer > (U_PLANE_INACTIVITY_VALUE * 10))) {
if((U_PLANE_INACTIVITY_VALUE != 0) && (UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer > (U_PLANE_INACTIVITY_VALUE * 10))) {
LOG_D(MAC,"UE %d rnti %x: U-Plane Failure after repeated PDCCH orders: Triggering RRC \n",UE_id,rnti);
mac_eNB_rrc_uplane_failure(module_idP,CC_id,frameP,subframeP,rnti);
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}// time > 60s
}
......@@ -564,7 +564,7 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
int CC_id = 0;
int UE_id = -1;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &(eNB->UE_list);
UE_info_t *UE_info = &(eNB->UE_info);
COMMON_channels_t *cc = eNB->common_channels;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
start_meas(&(eNB->eNB_scheduler));
......@@ -583,16 +583,16 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
/* Refresh UE list based on UEs dropped by PHY in previous subframe */
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id]) {
if (UE_info->active[UE_id]) {
rnti = UE_RNTI(module_idP, UE_id);
CC_id = UE_PCCID(module_idP, UE_id);
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_scheduling_control = &(UE_info->UE_sched_ctrl[UE_id]);
if (((frameP & 127) == 0) && (subframeP == 0)) {
LOG_I(MAC,"UE rnti %x : %s, PHR %d dB DL CQI %d PUSCH SNR %d PUCCH SNR %d\n",
rnti,
UE_scheduling_control->ul_out_of_sync == 0 ? "in synch" : "out of sync",
UE_list->UE_template[CC_id][UE_id].phr_info,
UE_info->UE_template[CC_id][UE_id].phr_info,
UE_scheduling_control->dl_cqi[CC_id],
(5 * UE_scheduling_control->pusch_snr[CC_id] - 640) / 10,
(5 * UE_scheduling_control->pucch1_snr[CC_id] - 640) / 10);
......@@ -600,7 +600,7 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
RC.eNB[module_idP][CC_id]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP] = -63;
if (UE_id == UE_list->head) {
if (UE_id == UE_info->list.head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BSR, RC.eNB[module_idP][CC_id]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP]);
}
......@@ -734,7 +734,7 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
* here for the current subframe. The variable 'UE_scheduling_control->in_active_time' should be updated
* ONLY here. The variable can then be used for testing the actual state of the UE for scheduling purpose.
*/
UE_template = &(UE_list->UE_template[CC_id][UE_id]);
UE_template = &(UE_info->UE_template[CC_id][UE_id]);
/* (a)synchronous HARQ processes handling for Active Time */
for (int harq_process_id = 0; harq_process_id < 8; harq_process_id++) {
......
......@@ -587,7 +587,7 @@ generate_Msg4(module_id_t module_idP,
{
eNB_MAC_INST *mac = RC.mac[module_idP];
COMMON_channels_t *cc = mac->common_channels;
UE_list_t *UE_list = &(mac->UE_list);
UE_info_t *UE_info = &mac->UE_info;
int16_t rrc_sdu_length = 0;
uint16_t msg4_padding = 0;
uint16_t msg4_post_padding = 0;
......@@ -831,7 +831,7 @@ generate_Msg4(module_id_t module_idP,
dl_req_body->number_pdu++;
ra->state = WAITMSG4ACK;
lcid = 0;
UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0;
UE_info->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0;
msg4_header = 1 + 6 + 1; // CR header, CR CE, SDU header
AssertFatal((ra->msg4_TBsize - ra->msg4_rrc_sdu_length - msg4_header)>=0,
"msg4_TBS %d is too small, change mcs to increase by %d bytes\n",ra->msg4_TBsize,ra->msg4_rrc_sdu_length+msg4_header-ra->msg4_TBsize);
......@@ -848,7 +848,7 @@ generate_Msg4(module_id_t module_idP,
module_idP, CC_idP, frameP, subframeP, ra->msg4_TBsize, ra->msg4_rrc_sdu_length, msg4_header, msg4_padding, msg4_post_padding);
DevAssert (UE_id != UE_INDEX_INVALID); // FIXME not sure how to gracefully return
// CHECK THIS: &cc[CC_idP].CCCH_pdu.payload[0]
offset = generate_dlsch_header ((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0], 1, //num_sdus
offset = generate_dlsch_header ((unsigned char *) mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0], 1, //num_sdus
(unsigned short *) &ra->msg4_rrc_sdu_length, //
&lcid, // sdu_lcid
255, // no drx
......@@ -856,7 +856,7 @@ generate_Msg4(module_id_t module_idP,
ra->cont_res_id, // contention res id
msg4_padding, // no padding
msg4_post_padding);
memcpy ((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][(unsigned char) offset], &cc[CC_idP].CCCH_pdu.payload[0], ra->msg4_rrc_sdu_length);
memcpy ((void *) &mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][(unsigned char) offset], &cc[CC_idP].CCCH_pdu.payload[0], ra->msg4_rrc_sdu_length);
// DL request
mac->TX_req[CC_idP].sfn_sf = (frameP << 4) + subframeP;
TX_req = &mac->TX_req[CC_idP].tx_request_body.tx_pdu_list[mac->TX_req[CC_idP].tx_request_body.number_of_pdus];
......@@ -864,7 +864,7 @@ generate_Msg4(module_id_t module_idP,
TX_req->pdu_index = mac->pdu_index[CC_idP]++;
TX_req->num_segments = 1;
TX_req->segments[0].segment_length = ra->msg4_TBsize;
TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0];
TX_req->segments[0].segment_data = mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0];
mac->TX_req[CC_idP].tx_request_body.number_of_pdus++;
// Program ACK/NAK for Msg4 PDSCH
int absSF = (frameP * 10) + subframeP;
......@@ -899,8 +899,8 @@ generate_Msg4(module_id_t module_idP,
ul_req_body->number_of_pdus++;
T (T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT (module_idP), T_INT (CC_idP), T_INT (ra->rnti), T_INT (frameP), T_INT (subframeP),
T_INT (0 /*harq_pid always 0? */ ), T_BUFFER (&mac->UE_list.DLSCH_pdu[CC_idP][0][UE_id].payload[0], ra->msg4_TBsize));
trace_pdu (DIRECTION_DOWNLINK, (uint8_t *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0],
T_INT (0 /*harq_pid always 0? */ ), T_BUFFER (&mac->UE_info.DLSCH_pdu[CC_idP][0][UE_id].payload[0], ra->msg4_TBsize));
trace_pdu (DIRECTION_DOWNLINK, (uint8_t *) mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0],
ra->msg4_rrc_sdu_length,
UE_id, 3, UE_RNTI (module_idP, UE_id),
mac->frame, mac->subframe, 0, 0);
......@@ -963,10 +963,10 @@ generate_Msg4(module_id_t module_idP,
1, // tpc, none
getRIV(N_RB_DL, first_rb, 4), // resource_block_coding
ra->msg4_mcs, // mcs
1 - UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
1 - UE_info->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
0, // rv
0); // vrb_flag
UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid] = 1 - UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid];
UE_info->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid] = 1 - UE_info->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid];
LOG_D(MAC,
"Frame %d, subframe %d: Msg4 DCI pdu_num %d (rnti %x,rnti_type %d,harq_pid %d, resource_block_coding (%p) %d\n",
frameP, subframeP, dl_req_body->number_pdu,
......@@ -1000,7 +1000,7 @@ generate_Msg4(module_id_t module_idP,
lcid = 0;
// put HARQ process round to 0
ra->harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0;
UE_info->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0;
if ((ra->msg4_TBsize - rrc_sdu_length - msg4_header) <= 2) {
msg4_padding = ra->msg4_TBsize - rrc_sdu_length - msg4_header;
......@@ -1018,7 +1018,7 @@ generate_Msg4(module_id_t module_idP,
DevAssert(UE_id != UE_INDEX_INVALID); // FIXME not sure how to gracefully return
// CHECK THIS: &cc[CC_idP].CCCH_pdu.payload[0]
int num_sdus = rrc_sdu_length > 0 ? 1 : 0;
offset = generate_dlsch_header((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0],
offset = generate_dlsch_header((unsigned char *) mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0],
num_sdus, //num_sdus
(unsigned short *) &rrc_sdu_length, //
&lcid, // sdu_lcid
......@@ -1027,7 +1027,7 @@ generate_Msg4(module_id_t module_idP,
ra->cont_res_id, // contention res id
msg4_padding, // no padding
msg4_post_padding);
memcpy((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][(unsigned char)offset],
memcpy((void *) &mac->UE_info.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][(unsigned char)offset],
&cc[CC_idP].CCCH_pdu.payload[0], rrc_sdu_length);
// DLSCH Config
fill_nfapi_dlsch_config(mac, dl_req_body, ra->msg4_TBsize, mac->pdu_index[CC_idP], ra->rnti, 2, // resource_allocation_type : format 1A/1B/1D
......@@ -1058,7 +1058,7 @@ generate_Msg4(module_id_t module_idP,
(frameP * 10) + subframeP,
rrc_sdu_length+offset,
mac->pdu_index[CC_idP],
mac->UE_list.
mac->UE_info.
DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0]);
mac->pdu_index[CC_idP]++;
dl_req->sfn_sf = mac->TX_req[CC_idP].sfn_sf;
......@@ -1074,11 +1074,11 @@ generate_Msg4(module_id_t module_idP,
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP),
T_INT(CC_idP), T_INT(ra->rnti), T_INT(frameP),
T_INT(subframeP), T_INT(0 /*harq_pid always 0? */ ),
T_BUFFER(&mac->UE_list.DLSCH_pdu[CC_idP][0][UE_id].
T_BUFFER(&mac->UE_info.DLSCH_pdu[CC_idP][0][UE_id].
payload[0], ra->msg4_TBsize));
trace_pdu(DIRECTION_DOWNLINK,
(uint8_t *) mac->
UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0],
UE_info.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0],
rrc_sdu_length, UE_id, WS_C_RNTI,
UE_RNTI(module_idP, UE_id), mac->frame,
mac->subframe, 0, 0);
......@@ -1116,14 +1116,14 @@ check_Msg4_retransmission(module_id_t module_idP, int CC_idP,
int first_rb;
int N_RB_DL;
nfapi_dl_config_request_pdu_t *dl_config_pdu;
UE_list_t *UE_list = &mac->UE_list;
UE_info_t *UE_info = &mac->UE_info;
nfapi_dl_config_request_t *dl_req;
nfapi_dl_config_request_body_t *dl_req_body;
int round;
// check HARQ status and retransmit if necessary
UE_id = find_UE_id(module_idP, ra->rnti);
AssertFatal(UE_id >= 0, "Can't find UE for t-crnti\n");
round = UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid];
round = UE_info->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid];
vrb_map = cc[CC_idP].vrb_map;
dl_req = &mac->DL_req[CC_idP];
dl_req_body = &dl_req->dl_config_request_body;
......@@ -1156,7 +1156,7 @@ check_Msg4_retransmission(module_id_t module_idP, int CC_idP,
1, // tpc, none
getRIV(N_RB_DL, first_rb, 4), // resource_block_coding
ra->msg4_mcs, // mcs
UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
UE_info->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
round & 3, // rv
0); // vrb_flag
......@@ -1226,8 +1226,8 @@ check_Msg4_retransmission(module_id_t module_idP, int CC_idP,
LOG_D(MAC,"[eNB %d][RAPROC] Frame %d, Subframe %d: state:IDLE\n", module_idP, frameP, subframeP);
UE_id = find_UE_id(module_idP, ra->rnti);
DevAssert(UE_id != -1);
mac->UE_list.UE_template[UE_PCCID(module_idP, UE_id)][UE_id].configured = TRUE;
mac->UE_list.UE_template[UE_PCCID(module_idP, UE_id)][UE_id].pusch_repetition_levels=ra->pusch_repetition_levels;
mac->UE_info.UE_template[UE_PCCID(module_idP, UE_id)][UE_id].configured = TRUE;
mac->UE_info.UE_template[UE_PCCID(module_idP, UE_id)][UE_id].pusch_repetition_levels=ra->pusch_repetition_levels;
cancel_ra_proc(module_idP, CC_idP, frameP, ra->rnti);
}
}
......
......@@ -94,7 +94,7 @@ schedule_next_dlue(module_id_t module_idP,
//------------------------------------------------------------------------------
{
int next_ue;
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
UE_list_t *UE_list = &RC.mac[module_idP]->UE_info.list;
for (next_ue = UE_list->head; next_ue >= 0; next_ue = UE_list->next[next_ue]) {
if (eNB_dlsch_info[module_idP][CC_id][next_ue].status == S_DL_WAITING) {
......@@ -317,12 +317,12 @@ set_ul_DAI(int module_idP,
//------------------------------------------------------------------------------
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
unsigned char DAI;
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
if (cc->tdd_Config != NULL) { //TDD
DAI = (UE_list->UE_template[CC_idP][UE_idP].DAI - 1) & 3;
DAI = (UE_info->UE_template[CC_idP][UE_idP].DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] CC_id %d Frame %d, subframe %d: DAI %d for UE %d\n",
module_idP,
CC_idP,
......@@ -341,20 +341,20 @@ set_ul_DAI(int module_idP,
switch (subframeP) {
case 0:
case 1:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[7] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[7] = DAI;
break;
case 4:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[8] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[8] = DAI;
break;
case 5:
case 6:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI;
break;
case 9:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI;
break;
}
......@@ -362,30 +362,30 @@ set_ul_DAI(int module_idP,
case 2:
// if ((subframeP==3)||(subframeP==8))
// UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
// UE_info->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
break;
case 3:
//if ((subframeP==6)||(subframeP==8)||(subframeP==0)) {
// LOG_D(MAC,"schedule_ue_spec: setting UL DAI to %d for subframeP %d => %d\n",DAI,subframeP, ((subframeP+8)%10)>>1);
// UE_list->UE_template[CC_idP][UE_idP].DAI_ul[((subframeP+8)%10)>>1] = DAI;
// UE_info->UE_template[CC_idP][UE_idP].DAI_ul[((subframeP+8)%10)>>1] = DAI;
//}
switch (subframeP) {
case 5:
case 6:
case 1:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI;
break;
case 7:
case 8:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI;
break;
case 9:
case 0:
UE_list->UE_template[CC_idP][UE_idP].DAI_ul[4] = DAI;
UE_info->UE_template[CC_idP][UE_idP].DAI_ul[4] = DAI;
break;
default:
......@@ -396,17 +396,17 @@ set_ul_DAI(int module_idP,
case 4:
// if ((subframeP==8)||(subframeP==9))
// UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
// UE_info->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
break;
case 5:
// if (subframeP==8)
// UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
// UE_info->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
break;
case 6:
// if ((subframeP==1)||(subframeP==4)||(subframeP==6)||(subframeP==9))
// UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
// UE_info->UE_template[CC_idP][UE_idP].DAI_ul = DAI;
break;
default:
......@@ -474,7 +474,7 @@ schedule_ue_spec(module_id_t module_idP,
int sdu_length_total = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
int continue_flag = 0;
int32_t snr, target_snr;
int tpc = 1;
......@@ -558,7 +558,7 @@ schedule_ue_spec(module_id_t module_idP,
total_nb_available_rb--;
// store the global enb stats:
eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_list->num_UEs;
eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_info->num_UEs;
eNB->eNB_stats[CC_id].available_prbs = total_nb_available_rb;
eNB->eNB_stats[CC_id].total_available_prbs += total_nb_available_rb;
eNB->eNB_stats[CC_id].dlsch_bytes_tx = 0;
......@@ -581,24 +581,24 @@ schedule_ue_spec(module_id_t module_idP,
CC_id);
dl_req = &eNB->DL_req[CC_id].dl_config_request_body;
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
LOG_D(MAC, "doing schedule_ue_spec for CC_id %d UE %d\n",
CC_id,
UE_id);
continue_flag = 0; // reset the flag to allow allocation for the remaining UEs
rnti = UE_RNTI(module_idP, UE_id);
ue_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
ue_template = &UE_list->UE_template[CC_id][UE_id];
ue_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
ue_template = &UE_info->UE_template[CC_id][UE_id];
if (ue_template->rach_resource_type > 0) {
continue_flag = 1;
}
if (&(UE_list->eNB_UE_stats[CC_id][UE_id]) == NULL) {
if (&(UE_info->eNB_UE_stats[CC_id][UE_id]) == NULL) {
LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n");
continue_flag = 1;
} else {
eNB_UE_stats = &(UE_list->eNB_UE_stats[CC_id][UE_id]);
eNB_UE_stats = &(UE_info->eNB_UE_stats[CC_id][UE_id]);
}
if (continue_flag != 1) {
......@@ -650,7 +650,7 @@ schedule_ue_spec(module_id_t module_idP,
UE_id,
CC_id,
cc[CC_id].tdd_Config->subframeAssignment,
UE_list);
UE_info);
// update UL DAI after DLSCH scheduling
set_ul_DAI(module_idP,
UE_id,
......@@ -1316,7 +1316,7 @@ schedule_ue_spec(module_id_t module_idP,
post_padding = 1;
}
offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
offset = generate_dlsch_header((unsigned char *) UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0],
num_sdus, //num_sdus
sdu_lengths, //
sdu_lcids,
......@@ -1360,7 +1360,7 @@ schedule_ue_spec(module_id_t module_idP,
LOG_T(MAC, "\n");
#endif
// cycle through SDUs and place in dlsch_buffer
dlsch_pdu = &UE_list->DLSCH_pdu[CC_id][0][UE_id];
dlsch_pdu = &UE_info->DLSCH_pdu[CC_id][0][UE_id];
memcpy(&dlsch_pdu->payload[0][offset],
dlsch_buffer,
sdu_length_total);
......@@ -1654,7 +1654,7 @@ schedule_ue_spec_br(module_id_t module_idP,
unsigned char dlsch_buffer[MAX_DLSCH_PAYLOAD_BYTES];
eNB_MAC_INST *mac = RC.mac[module_idP];
COMMON_channels_t *cc = mac->common_channels;
UE_list_t *UE_list = &mac->UE_list;
UE_info_t *UE_info = &mac->UE_info;
UE_TEMPLATE *UE_template = NULL;
UE_sched_ctrl_t *ue_sched_ctl = NULL;
nfapi_dl_config_request_pdu_t *dl_config_pdu = NULL;
......@@ -1719,7 +1719,7 @@ schedule_ue_spec_br(module_id_t module_idP,
}
}
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
int harq_pid = 0;
rnti = UE_RNTI(module_idP, UE_id);
......@@ -1727,8 +1727,8 @@ schedule_ue_spec_br(module_id_t module_idP,
continue;
}
ue_sched_ctl = &(UE_list->UE_sched_ctrl[UE_id]);
UE_template = &(UE_list->UE_template[CC_id][UE_id]);
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
if (UE_template->rach_resource_type == 0) {
continue;
......@@ -1836,8 +1836,8 @@ schedule_ue_spec_br(module_id_t module_idP,
sdu_lengths[0]);
sdu_length_total = sdu_lengths[0];
sdu_lcids[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH]+=1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH]+=sdu_lengths[0];
UE_info->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH]+=1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH]+=sdu_lengths[0];
num_sdus = 1;
} else {
header_len_dcch = 0;
......@@ -1889,8 +1889,8 @@ schedule_ue_spec_br(module_id_t module_idP,
sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus];
header_len_dcch += 2;
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
UE_info->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
num_sdus++;
}
}
......@@ -2042,7 +2042,7 @@ schedule_ue_spec_br(module_id_t module_idP,
post_padding = TBS - sdu_length_total - header_len_dcch - header_len_dtch - ta_len; // 1 is for the postpadding header
}
offset = generate_dlsch_header((unsigned char *)UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
offset = generate_dlsch_header((unsigned char *)UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0],
num_sdus, //num_sdus
sdu_lengths, //
sdu_lcids,
......@@ -2075,15 +2075,15 @@ schedule_ue_spec_br(module_id_t module_idP,
}
/* Cycle through SDUs and place in dlsch_buffer */
memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset], dlsch_buffer, sdu_length_total);
memcpy(&UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset], dlsch_buffer, sdu_length_total);
/* Fill remainder of DLSCH with random data */
for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = (char)(taus()&0xff);
UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = (char)(taus()&0xff);
}
trace_pdu(DIRECTION_DOWNLINK,
(uint8_t *)UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
(uint8_t *)UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0],
TBS,
module_idP,
3,
......@@ -2099,7 +2099,7 @@ schedule_ue_spec_br(module_id_t module_idP,
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
T_BUFFER(UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
/* Do PUCCH power control */
/* This is the snr */
/* unit is not dBm, it's special from nfapi, convert to dBm */
......@@ -2107,15 +2107,15 @@ schedule_ue_spec_br(module_id_t module_idP,
target_snr = mac->puCch10xSnr / 10;
/* This assumes accumulated tpc */
/* Make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out */
int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame * 10 + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
int32_t framex10psubframe = UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_frame * 10 + UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
if (((framex10psubframe + 10) <= (frameP * 10 + subframeP)) || // normal case
((framex10psubframe > (frameP * 10 + subframeP)) &&
(((10240 - framex10psubframe +frameP * 10 + subframeP) >= 10)))) { // frame wrap-around
if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctl->pucch1_cqi_update[CC_id] = 0;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frameP;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframeP;
UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frameP;
UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframeP;
if (snr > target_snr + 4) {
tpc = 0; //-1
......@@ -2249,7 +2249,7 @@ schedule_ue_spec_br(module_id_t module_idP,
TX_req->pdu_index = mac->pdu_index[CC_id]++;
TX_req->num_segments = 1;
TX_req->segments[0].segment_length = TX_req->pdu_length;
TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0];
TX_req->segments[0].segment_data = mac->UE_info.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0];
mac->TX_req[CC_id].tx_request_body.number_of_pdus++;
ackNAK_absSF = absSF + 4;
ul_req = &mac->UL_req_tmp[CC_id][ackNAK_absSF % 10].ul_config_request_body;
......@@ -2283,9 +2283,9 @@ schedule_ue_spec_br(module_id_t module_idP,
T_INT (frameP),
T_INT (subframeP),
T_INT (0 /* harq_pid always 0? */ ),
T_BUFFER (&mac->UE_list.DLSCH_pdu[CC_id][0][UE_id].payload[0], TX_req->pdu_length));
T_BUFFER (&mac->UE_info.DLSCH_pdu[CC_id][0][UE_id].payload[0], TX_req->pdu_length));
trace_pdu(1,
(uint8_t *) mac->UE_list.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0],
(uint8_t *) mac->UE_info.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0],
TX_req->pdu_length,
UE_id,
3,
......@@ -2317,7 +2317,7 @@ fill_DLSCH_dci(module_id_t module_idP,
int i;
int CC_id;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
int N_RBG;
int N_RB_DL;
COMMON_channels_t *cc;
......@@ -2336,7 +2336,7 @@ fill_DLSCH_dci(module_id_t module_idP,
N_RB_DL = to_prb(cc->mib->message.dl_Bandwidth);
// UE specific DCIs
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
dlsch_info = &eNB_dlsch_info[module_idP][CC_id][UE_id];
LOG_T(MAC, "CC_id %d, UE_id: %d => status %d\n",
CC_id,
......@@ -2349,7 +2349,7 @@ fill_DLSCH_dci(module_id_t module_idP,
rnti = UE_RNTI(module_idP, UE_id);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,
subframeP);
ue_template = &UE_list->UE_template[CC_id][UE_id];
ue_template = &UE_info->UE_template[CC_id][UE_id];
nb_rb = ue_template->nb_rb[harq_pid];
/// Synchronizing rballoc with rballoc_sub
......@@ -2425,7 +2425,7 @@ unsigned char *get_dlsch_sdu(module_id_t module_idP,
CC_id,
rntiP,
UE_id);
return ((unsigned char *) &eNB->UE_list.DLSCH_pdu[CC_id][TBindex][UE_id].payload[0]);
return ((unsigned char *) &eNB->UE_info.DLSCH_pdu[CC_id][TBindex][UE_id].payload[0]);
}
LOG_E(MAC, "[eNB %d] Frame %d: CC_id %d UE with RNTI %x does not exist\n",
......@@ -2470,61 +2470,61 @@ set_ue_dai(sub_frame_t subframeP,
int UE_id,
uint8_t CC_id,
uint8_t tdd_config,
UE_list_t *UE_list)
UE_info_t *UE_info)
//------------------------------------------------------------------------------
{
switch (tdd_config) {
case 0:
if (subframeP == 0 || subframeP == 1 || subframeP == 3 || subframeP == 5 || subframeP == 6 || subframeP == 8) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 1:
if (subframeP == 0 || subframeP == 4 || subframeP == 5 || subframeP == 9) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 2:
if (subframeP == 4 || subframeP == 5) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 3:
if (subframeP == 5 || subframeP == 7 || subframeP == 9) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 4:
if (subframeP == 0 || subframeP == 6) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 5:
if (subframeP == 9) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
case 6:
if (subframeP == 0 || subframeP == 1 || subframeP == 5 || subframeP == 6 || subframeP == 9) {
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
}
break;
default:
UE_list->UE_template[CC_id][UE_id].DAI = 0;
UE_info->UE_template[CC_id][UE_id].DAI = 0;
LOG_I(MAC, "unknown TDD config %d\n",
tdd_config);
break;
......
......@@ -120,7 +120,7 @@ void pre_scd_nb_rbs_required( module_id_t module_idP,
if(N_RB_DL==100) step_size=4;
memset(nb_rbs_required, 0, sizeof(uint16_t)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
for (UE_id = 0; UE_id <NUMBER_OF_UE_MAX; UE_id++) {
if (pre_scd_activeUE[UE_id] != TRUE)
......@@ -144,7 +144,7 @@ void pre_scd_nb_rbs_required( module_id_t module_idP,
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
//update CQI information across component carriers
eNB_UE_stats = &pre_scd_eNB_UE_stats[CC_id][UE_id];
eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
if (UE_template.dl_buffer_total > 0) {
nb_rbs_required[CC_id][UE_id] = search_rbs_required(eNB_UE_stats->dlsch_mcs1, UE_template.dl_buffer_total, N_RB_DL, step_size);
......@@ -175,7 +175,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
DLSCH_UE_SELECT dlsch_ue_select[MAX_NUM_CCs]) {
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
UE_sched_ctrl_t *ue_sched_ctl;
uint8_t CC_id;
int UE_id;
......@@ -208,7 +208,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
DL_req = &eNB->DL_req[CC_id].dl_config_request_body;
for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
if (UE_list->active[UE_id] == FALSE) {
if (UE_info->active[UE_id] == FALSE) {
continue;
}
......@@ -222,12 +222,12 @@ void dlsch_scheduler_pre_ue_select_fairRR(
continue;
}
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = frame_subframe2_dl_harq_pid(cc[CC_id].tdd_Config,frameP,subframeP);
round = ue_sched_ctl->round[CC_id][harq_pid];
if (round != 8) { // retransmission
if(UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] == 0) {
if(UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid] == 0) {
continue;
}
......@@ -266,7 +266,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = (format_flag == 0)?2:1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = aggregation;
DL_req->number_pdu++;
nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
nb_rbs_required[CC_id][UE_id] = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid];
// Insert DLSCH(retransmission) UE into selected UE list
dlsch_ue_select[CC_id].list[dlsch_ue_select[CC_id].ue_num].UE_id = UE_id;
dlsch_ue_select[CC_id].list[dlsch_ue_select[CC_id].ue_num].ue_priority = SCH_DL_RETRANS;
......@@ -284,7 +284,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
UE_id,
CC_id,
cc[CC_id].tdd_Config->subframeAssignment,
UE_list);
UE_info);
// update UL DAI after DLSCH scheduling
set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
}
......@@ -324,7 +324,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
break;
}
if (UE_list->active[UE_id] == FALSE) {
if (UE_info->active[UE_id] == FALSE) {
continue;
}
......@@ -337,7 +337,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
continue;
}
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
for(i = 0; i<dlsch_ue_select[CC_id].ue_num; i++) {
if(dlsch_ue_select[CC_id].list[i].UE_id == UE_id) {
......@@ -408,7 +408,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
UE_id,
CC_id,
cc[CC_id].tdd_Config->subframeAssignment,
UE_list);
UE_info);
// update UL DAI after DLSCH scheduling
set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
}
......@@ -448,7 +448,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
break;
}
if (UE_list->active[UE_id] == FALSE) {
if (UE_info->active[UE_id] == FALSE) {
continue;
}
......@@ -461,7 +461,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
continue;
}
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
for(i = 0; i<dlsch_ue_select[CC_id].ue_num; i++) {
if(dlsch_ue_select[CC_id].list[i].UE_id == UE_id) {
......@@ -532,7 +532,7 @@ void dlsch_scheduler_pre_ue_select_fairRR(
UE_id,
CC_id,
cc[CC_id].tdd_Config->subframeAssignment,
UE_list);
UE_info);
// update UL DAI after DLSCH scheduling
set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
}
......@@ -569,7 +569,7 @@ void dlsch_scheduler_pre_processor_reset_fairRR(
int UE_id;
uint8_t CC_id;
int i, j;
UE_list_t *UE_list;
UE_info_t *UE_info;
UE_sched_ctrl_t *ue_sched_ctl;
int N_RB_DL, RBGsize, RBGsize_last;
int N_RBG[NFAPI_CC_MAX];
......@@ -585,14 +585,14 @@ void dlsch_scheduler_pre_processor_reset_fairRR(
min_rb_unit[CC_id] = get_min_rb_unit(module_idP, CC_id);
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) {
UE_list = &RC.mac[module_idP]->UE_list;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_info = &RC.mac[module_idP]->UE_info;
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
rnti = UE_RNTI(module_idP, UE_id);
if (rnti == NOT_A_RNTI)
continue;
if (UE_list->active[UE_id] != TRUE)
if (UE_info->active[UE_id] != TRUE)
continue;
LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id, rnti);
......@@ -734,48 +734,48 @@ void assign_rbs_required_fairRR(
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) {
uint16_t TBS = 0;
int UE_id, n, i, j, CC_id, pCCid, tmp;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
eNB_UE_STATS *eNB_UE_stats, *eNB_UE_stats_i, *eNB_UE_stats_j;
int N_RB_DL;
// clear rb allocations across all CC_id
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] != TRUE)
if (UE_info->active[UE_id] != TRUE)
continue;
pCCid = UE_PCCID(Mod_id, UE_id);
// update CQI information across component carriers
for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) {
CC_id = UE_list->ordered_CCids[n][UE_id];
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
for (n = 0; n < UE_info->numactiveCCs[UE_id]; n++) {
CC_id = UE_info->ordered_CCids[n][UE_id];
eNB_UE_stats = &UE_info->eNB_UE_stats[CC_id][UE_id];
eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
}
// provide the list of CCs sorted according to MCS
for (i = 0; i < UE_list->numactiveCCs[UE_id]; ++i) {
for (i = 0; i < UE_info->numactiveCCs[UE_id]; ++i) {
eNB_UE_stats_i =
&UE_list->eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]][UE_id];
&UE_info->eNB_UE_stats[UE_info->ordered_CCids[i][UE_id]][UE_id];
for (j = i + 1; j < UE_list->numactiveCCs[UE_id]; j++) {
for (j = i + 1; j < UE_info->numactiveCCs[UE_id]; j++) {
DevAssert(j < NFAPI_CC_MAX);
eNB_UE_stats_j =
&UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id];
&UE_info->eNB_UE_stats[UE_info->ordered_CCids[j][UE_id]][UE_id];
if (eNB_UE_stats_j->dlsch_mcs1 > eNB_UE_stats_i->dlsch_mcs1) {
tmp = UE_list->ordered_CCids[i][UE_id];
UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
UE_list->ordered_CCids[j][UE_id] = tmp;
tmp = UE_info->ordered_CCids[i][UE_id];
UE_info->ordered_CCids[i][UE_id] = UE_info->ordered_CCids[j][UE_id];
UE_info->ordered_CCids[j][UE_id] = tmp;
}
}
}
if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total > 0) {
if (UE_info->UE_template[pCCid][UE_id].dl_buffer_total > 0) {
LOG_D(MAC, "[preprocessor] assign RB for UE %d\n", UE_id);
for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
for (i = 0; i < UE_info->numactiveCCs[UE_id]; i++) {
CC_id = UE_info->ordered_CCids[i][UE_id];
eNB_UE_stats = &UE_info->eNB_UE_stats[CC_id][UE_id];
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
if (eNB_UE_stats->dlsch_mcs1 == 0) {
......@@ -790,14 +790,14 @@ void assign_rbs_required_fairRR(
"buffer %d (RB unit %d, MCS %d, TBS %d) \n",
UE_id,
CC_id,
UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
UE_info->UE_template[pCCid][UE_id].dl_buffer_total,
nb_rbs_required[CC_id][UE_id],
eNB_UE_stats->dlsch_mcs1,
TBS);
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
/* calculating required number of RBs for each UE */
while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) {
while (TBS < UE_info->UE_template[pCCid][UE_id].dl_buffer_total) {
nb_rbs_required[CC_id][UE_id] += min_rb_unit;
if (nb_rbs_required[CC_id][UE_id] > N_RB_DL) {
......@@ -835,8 +835,8 @@ void dlsch_scheduler_pre_processor_allocate_fairRR(
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) {
int i;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl_t *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
UE_sched_ctrl_t *ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
int N_RB_DL =
to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
......@@ -904,7 +904,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
int min_rb_unit[MAX_NUM_CCs];
// uint16_t r1=0;
uint8_t CC_id;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
int N_RB_DL;
UE_sched_ctrl_t *ue_sched_ctl;
// int rrc_status = RRC_IDLE;
......@@ -930,7 +930,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
min_rb_unit[CC_id] = get_min_rb_unit(Mod_id, CC_id);
for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
if (UE_list->active[i] != TRUE)
if (UE_info->active[i] != TRUE)
continue;
UE_id = i;
......@@ -999,7 +999,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
}
rnti = dlsch_ue_select[CC_id].list[i].rnti;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
Round = ue_sched_ctl->round[CC_id][harq_pid];
......@@ -1095,7 +1095,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
}
UE_id = dlsch_ue_select[CC_id].list[i].UE_id;
ue_sched_ctl = &RC.mac[Mod_id]->UE_list.UE_sched_ctrl[UE_id];
ue_sched_ctl = &RC.mac[Mod_id]->UE_info.UE_sched_ctrl[UE_id];
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0) {
......@@ -1148,7 +1148,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
uint16_t sdu_length_total = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
// int continue_flag = 0;
int32_t snr, target_snr;
int32_t tpc = 1;
......@@ -1246,7 +1246,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
N_RBG[CC_id] = to_rbg(cc[CC_id].mib->message.dl_Bandwidth);
// store the global enb stats:
eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_list->num_UEs;
eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_info->num_UEs;
eNB->eNB_stats[CC_id].available_prbs =
total_nb_available_rb[CC_id];
eNB->eNB_stats[CC_id].total_available_prbs +=
......@@ -1286,12 +1286,12 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
rnti = UE_RNTI(module_idP,UE_id);
if (rnti==NOT_A_RNTI) {
LOG_E(MAC,"Cannot find rnti for UE_id %d (num_UEs %d)\n",UE_id,UE_list->num_UEs);
LOG_E(MAC,"Cannot find rnti for UE_id %d (num_UEs %d)\n",UE_id,UE_info->num_UEs);
continue;
}
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
eNB_UE_stats = &UE_info->eNB_UE_stats[CC_id][UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
/*
switch(get_tmode(module_idP,CC_id,UE_id)){
......@@ -1318,7 +1318,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
UE_id,
CC_id,
cc[CC_id].tdd_Config->subframeAssignment,
UE_list);
UE_info);
// update UL DAI after DLSCH scheduling
set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
}
......@@ -1326,18 +1326,16 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
round = ue_sched_ctl->round[CC_id][harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].crnti = rnti;
UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status =
mac_eNB_get_rrc_status(module_idP, rnti);
UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid;
UE_list->eNB_UE_stats[CC_id][UE_id].harq_round = round;
UE_info->eNB_UE_stats[CC_id][UE_id].crnti = rnti;
UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status = mac_eNB_get_rrc_status(module_idP, rnti);
UE_info->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid;
UE_info->eNB_UE_stats[CC_id][UE_id].harq_round = round;
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_RECONFIGURED) {
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
if (UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_RECONFIGURED) {
UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status <
RRC_CONNECTED)
if (UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED)
continue;
sdu_length_total = 0;
......@@ -1356,13 +1354,11 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
//eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
// store stats
//UE_list->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi;
//UE_info->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi;
// initializing the rb allocation indicator for each UE
for (j = 0; j < N_RBG[CC_id]; j++) {
UE_list->
UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j]
= 0;
UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
}
LOG_D(MAC,
......@@ -1370,33 +1366,31 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
module_idP, frameP, UE_id, CC_id, rnti, harq_pid, round,
nb_available_rb, ue_sched_ctl->dl_cqi[CC_id],
eNB_UE_stats->dlsch_mcs1,
UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status);
UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status);
/* process retransmission */
if (round != 8) {
// get freq_allocation
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid];
TBS =
get_TBS_DL(UE_list->
UE_template[CC_id][UE_id].oldmcs1[harq_pid],
get_TBS_DL(UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid],
nb_rb);
if (nb_rb <= nb_available_rb) {
if (cc[CC_id].tdd_Config != NULL) {
UE_list->UE_template[CC_id][UE_id].DAI++;
UE_info->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(module_idP, CC_id, rnti,
UE_list->UE_template[CC_id][UE_id].
DAI,subframeP);
UE_info->UE_template[CC_id][UE_id].DAI,subframeP);
LOG_D(MAC,
"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id, subframeP, UE_id,
UE_list->UE_template[CC_id][UE_id].DAI);
UE_info->UE_template[CC_id][UE_id].DAI);
}
if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].
UE_info->UE_template[CC_id][UE_id].
rballoc_subband[harq_pid][j] =
ue_sched_ctl->rballoc_sub_UE[CC_id][j];
}
......@@ -1407,16 +1401,10 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) {
if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] ==
1) {
if (UE_list->
UE_template[CC_id]
[UE_id].rballoc_subband[harq_pid][j])
printf
("WARN: rballoc_subband not free for retrans?\n");
if (UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j])
LOG_W(MAC, "WARN: rballoc_subband not free for retrans?\n");
UE_list->
UE_template[CC_id]
[UE_id].rballoc_subband[harq_pid][j] =
ue_sched_ctl->rballoc_sub_UE[CC_id][j];
UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
if ((j == N_RBG[CC_id] - 1) &&
((N_RB_DL[CC_id] == 25) ||
......@@ -1440,7 +1428,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
eNB->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
for(j=0; j<N_RBG[CC_id]; j++) {
eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
}
*/
......@@ -1477,36 +1465,23 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // dont adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.new_data_indicator_1 =
UE_list->UE_template[CC_id][UE_id].
oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 =
UE_list->UE_template[CC_id][UE_id].
oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.redundancy_version_1 =
round & 3;
dci_dl_pdu_rel8.new_data_indicator_1 = UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round & 3;
if (cc[CC_id].tdd_Config != NULL) { //TDD
dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.downlink_assignment_index =
(UE_list->UE_template[CC_id][UE_id].DAI -
1) & 3;
dci_dl_pdu_rel8.downlink_assignment_index = (UE_info->UE_template[CC_id][UE_id].DAI - 1) & 3;
LOG_D(MAC,
"[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n",
module_idP, CC_id, harq_pid, round,
(UE_list->UE_template[CC_id][UE_id].DAI -
1),
UE_list->
UE_template[CC_id][UE_id].oldmcs1
[harq_pid]);
UE_info->UE_template[CC_id][UE_id].DAI - 1,
UE_info-> UE_template[CC_id][UE_id].oldmcs1[harq_pid]);
} else {
LOG_D(MAC,
"[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n",
module_idP, CC_id, harq_pid, round,
UE_list->
UE_template[CC_id][UE_id].oldmcs1
[harq_pid]);
UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid]);
}
if (!CCE_allocation_infeasible
......@@ -1523,7 +1498,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
, rnti, 0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]), round & 3, // redundancy version
getQm(UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid]), round & 3, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme
......@@ -1531,7 +1506,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, 0, // delta_power_offset for TM5
UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, 0, // delta_power_offset for TM5
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
......@@ -1561,16 +1536,11 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
S_DL_SCHEDULED,
rnti);
//eNB_UE_stats->dlsch_trials[round]++;
UE_list->eNB_UE_stats[CC_id][UE_id].
num_retransmission += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx =
nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].
total_rbs_used_retx += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 =
eNB_UE_stats->dlsch_mcs1;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 =
eNB_UE_stats->dlsch_mcs1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_retransmission += 1;
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used_retx = nb_rb;
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_retx += nb_rb;
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1;
} else {
LOG_D(MAC,
"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
......@@ -1682,11 +1652,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
module_idP, CC_id, frameP, subframeP, UE_id, rnti, sdu_lengths[0],rlc_status.bytes_in_buffer,rrc_release_info.num_UEs);
sdu_length_total = sdu_lengths[0];
sdu_lcids[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].
num_pdu_tx[DCCH] += 1;
UE_list->
eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH]
+= sdu_lengths[0];
UE_info->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0];
num_sdus = 1;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC,
......@@ -1727,11 +1694,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus];
header_len_dcch += 2;
UE_list->eNB_UE_stats[CC_id][UE_id].
num_pdu_tx[DCCH1] += 1;
UE_list->
eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1]
+= sdu_lengths[num_sdus];
UE_info->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
num_sdus++;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC,
......@@ -1808,8 +1772,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
lcid);
sdu_lcids[num_sdus] = lcid;
sdu_length_total += sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus];
UE_info->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid] += 1;
UE_info->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus];
if (sdu_lengths[num_sdus] < 128) {
header_len_dtch--;
......@@ -1817,7 +1781,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
}
num_sdus++;
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
} else { // no data for this LCID
header_len_dtch -= 3;
}
......@@ -1870,9 +1834,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].
rballoc_subband[harq_pid][j] =
ue_sched_ctl->rballoc_sub_UE[CC_id][j];
UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
......@@ -1881,10 +1843,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) {
if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] ==
1) {
UE_list->
UE_template[CC_id]
[UE_id].rballoc_subband[harq_pid][j] =
ue_sched_ctl->rballoc_sub_UE[CC_id][j];
UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
if ((j == N_RBG[CC_id] - 1) &&
((N_RB_DL[CC_id] == 25) ||
......@@ -1963,7 +1922,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
}
#endif
offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus
offset = generate_dlsch_header((unsigned char *) UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus
sdu_lengths, //
sdu_lcids, 255, // no drx
ta_update, // timing advance
......@@ -1992,20 +1951,20 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
LOG_T(MAC, "\n");
#endif
// cycle through SDUs and place in dlsch_buffer
memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset],dlsch_buffer,sdu_length_total);
memcpy(&UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset],dlsch_buffer,sdu_length_total);
// memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]);
// fill remainder of DLSCH with random data
for (j=0; j<(TBS-sdu_length_total-offset); j++) {
UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset+sdu_length_total+j] = (char)(taus()&0xff);
UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset+sdu_length_total+j] = (char)(taus()&0xff);
}
trace_pdu(DIRECTION_DOWNLINK, (uint8_t *)UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
trace_pdu(DIRECTION_DOWNLINK, (uint8_t *)UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0],
TBS, module_idP, WS_RA_RNTI, UE_RNTI(module_idP, UE_id),
eNB->frame, eNB->subframe,0,0);
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(subframeP),
T_INT(harq_pid), T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
T_INT(harq_pid), T_BUFFER(UE_info->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
add_ue_dlsch_info(module_idP,
CC_id,
UE_id,
......@@ -2015,37 +1974,37 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
// store stats
eNB->eNB_stats[CC_id].dlsch_bytes_tx+=sdu_length_total;
eNB->eNB_stats[CC_id].dlsch_pdus_tx+=1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1=eNB_UE_stats->dlsch_mcs1;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2=mcs;
UE_list->eNB_UE_stats[CC_id][UE_id].TBS = TBS;
UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes= TBS- sdu_length_total;
UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes+= sdu_length_total;
UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes+= TBS;
UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus+=1;
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb;
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb;
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1=eNB_UE_stats->dlsch_mcs1;
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2=mcs;
UE_info->eNB_UE_stats[CC_id][UE_id].TBS = TBS;
UE_info->eNB_UE_stats[CC_id][UE_id].overhead_bytes= TBS- sdu_length_total;
UE_info->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes+= sdu_length_total;
UE_info->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes+= TBS;
UE_info->eNB_UE_stats[CC_id][UE_id].total_num_pdus+=1;
if (cc[CC_id].tdd_Config != NULL) { // TDD
UE_list->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(module_idP,CC_id,rnti,UE_list->UE_template[CC_id][UE_id].DAI,subframeP);
UE_info->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(module_idP,CC_id,rnti,UE_info->UE_template[CC_id][UE_id].DAI,subframeP);
}
// do PUCCH power control
// this is the snr
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
eNB_UE_stats = &UE_info->eNB_UE_stats[CC_id][UE_id];
/* Unit is not dBm, it's special from nfapi */
snr = (5 * ue_sched_ctl->pucch1_snr[CC_id] - 640) / 10;
target_snr = eNB->puCch10xSnr / 10;
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame*10+UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
int32_t framex10psubframe = UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_frame*10+UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
if (((framex10psubframe+10)<=(frameP*10+subframeP)) || //normal case
((framex10psubframe>(frameP*10+subframeP)) && (((10240-framex10psubframe+frameP*10+subframeP)>=10)))) //frame wrap-around
if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctl->pucch1_cqi_update[CC_id] = 0;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame=frameP;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe=subframeP;
UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_frame=frameP;
UE_info->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe=subframeP;
if (snr > target_snr + 4) {
tpc = 0; //-1
......@@ -2080,7 +2039,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1-UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0;
//deactivate second codeword
......@@ -2088,10 +2047,10 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1;
if (cc[CC_id].tdd_Config != NULL) { //TDD
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_info->UE_template[CC_id][UE_id].DAI-1)&3;
LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n",
module_idP,CC_id,harq_pid,
(UE_list->UE_template[CC_id][UE_id].DAI-1),
(UE_info->UE_template[CC_id][UE_id].DAI-1),
mcs);
} else {
LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n",
......@@ -2110,12 +2069,12 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
// Toggle NDI for next time
LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id, frameP,subframeP,UE_id,
rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n");
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
rnti,harq_pid,UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_info->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n");
AssertFatal(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
fill_nfapi_dlsch_config(eNB,dl_req,
TBS,
eNB->pdu_index[CC_id],
......@@ -2132,7 +2091,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
......@@ -2144,7 +2103,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
(frameP*10)+subframeP,
TBS,
eNB->pdu_index[CC_id],
eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]);
eNB->UE_info.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]);
LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]);
eNB->pdu_index[CC_id]++;
program_dlsch_acknak(module_idP,CC_id,UE_id,frameP,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
......@@ -2189,7 +2148,7 @@ fill_DLSCH_dci_fairRR(
int i;
int CC_id;
eNB_MAC_INST *eNB =RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
int N_RBG;
int N_RB_DL;
COMMON_channels_t *cc;
......@@ -2225,11 +2184,11 @@ fill_DLSCH_dci_fairRR(
eNB_dlsch_info[module_idP][CC_id][UE_id].status = S_DL_WAITING;
rnti = UE_RNTI(module_idP,UE_id);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid];
/// Synchronizing rballoc with rballoc_sub
for(i=0; i<N_RBG; i++) {
rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i];
rballoc_sub[i] = UE_info->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i];
}
nfapi_dl_config_request_t *DL_req = &RC.mac[module_idP]->DL_req[0];
......@@ -2283,7 +2242,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
UE_sched_ctrl_t *UE_sched_ctl = NULL;
uint8_t cc_id_flag[MAX_NUM_CCs];
uint8_t harq_pid = 0,round = 0;
UE_list_t *UE_list= &eNB->UE_list;
UE_info_t *UE_info= &eNB->UE_info;
uint8_t aggregation;
int format_flag;
nfapi_hi_dci0_request_body_t *HI_DCI0_req;
......@@ -2302,7 +2261,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
// UE round >0
for ( UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++ ) {
if (UE_list->active[UE_id] == FALSE)
if (UE_info->active[UE_id] == FALSE)
continue;
rnti = UE_RNTI(module_idP,UE_id);
......@@ -2312,10 +2271,10 @@ void ulsch_scheduler_pre_ue_select_fairRR(
CC_id = UE_PCCID(module_idP,UE_id);
if (UE_list->UE_template[CC_id][UE_id].configured == FALSE)
if (UE_info->UE_template[CC_id][UE_id].configured == FALSE)
continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
if (UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
continue;
// UL DCI
......@@ -2339,12 +2298,12 @@ void ulsch_scheduler_pre_ue_select_fairRR(
//harq_pid
harq_pid = subframe2harqpid(cc,(frameP+(sched_subframeP<subframeP ? 1 : 0)),sched_subframeP);
//round
round = UE_list->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid];
round = UE_info->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid];
if ( round > 0 ) {
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
......@@ -2355,8 +2314,8 @@ void ulsch_scheduler_pre_ue_select_fairRR(
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation;
HI_DCI0_req->number_of_dci++;
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].ue_priority = SCH_UL_RETRANS;
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].start_rb = eNB->UE_list.UE_template[CC_id][UE_id].first_rb_ul[harq_pid];
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].nb_rb = eNB->UE_list.UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].start_rb = eNB->UE_info.UE_template[CC_id][UE_id].first_rb_ul[harq_pid];
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].nb_rb = eNB->UE_info.UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].UE_id = UE_id;
ulsch_ue_select[CC_id].ue_num++;
continue;
......@@ -2364,7 +2323,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
//
int bytes_to_schedule = UE_list->UE_template[CC_id][UE_id].estimated_ul_buffer - UE_list->UE_template[CC_id][UE_id].scheduled_ul_bytes;
int bytes_to_schedule = UE_info->UE_template[CC_id][UE_id].estimated_ul_buffer - UE_info->UE_template[CC_id][UE_id].scheduled_ul_bytes;
if (bytes_to_schedule < 0) bytes_to_schedule = 0;
......@@ -2376,14 +2335,14 @@ void ulsch_scheduler_pre_ue_select_fairRR(
continue;
}
if ( UE_list->UE_template[CC_id][UE_id].ul_SR > 0 ) {
if ( UE_info->UE_template[CC_id][UE_id].ul_SR > 0 ) {
first_ue_id[CC_id][ue_first_num[CC_id]]= UE_id;
first_ue_total[CC_id] [ue_first_num[CC_id]] = 0;
ue_first_num[CC_id]++;
continue;
}
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
rrc_status = mac_eNB_get_rrc_status(module_idP, rnti);
if ( ((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
......@@ -2396,7 +2355,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
/*if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
ul_period = 50;
......@@ -2426,7 +2385,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
rnti = UE_RNTI(module_idP,first_ue_id[CC_id][temp]);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[first_ue_id[CC_id][temp]].dl_cqi[CC_id],format0);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_info->UE_sched_ctrl[first_ue_id[CC_id][temp]].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
......@@ -2445,7 +2404,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
for ( UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++ ) {
if (UE_list->active[UE_id] == FALSE)
if (UE_info->active[UE_id] == FALSE)
continue;
rnti = UE_RNTI(module_idP,UE_id);
......@@ -2458,10 +2417,10 @@ void ulsch_scheduler_pre_ue_select_fairRR(
if (UE_id > last_ulsch_ue_id[CC_id])
continue;
if (UE_list->UE_template[CC_id][UE_id].configured == FALSE)
if (UE_info->UE_template[CC_id][UE_id].configured == FALSE)
continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
if (UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
continue;
if ( (ulsch_ue_select[CC_id].ue_num >= ulsch_ue_max_num[CC_id]) || (cc_id_flag[CC_id] == 1) ) {
......@@ -2489,19 +2448,19 @@ void ulsch_scheduler_pre_ue_select_fairRR(
HI_DCI0_req = &eNB->HI_DCI0_req[CC_id][subframeP].hi_dci0_request_body;
//SR BSR
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
int bytes_to_schedule = UE_list->UE_template[CC_id][UE_id].estimated_ul_buffer - UE_list->UE_template[CC_id][UE_id].scheduled_ul_bytes;
UE_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
int bytes_to_schedule = UE_info->UE_template[CC_id][UE_id].estimated_ul_buffer - UE_info->UE_template[CC_id][UE_id].scheduled_ul_bytes;
if (bytes_to_schedule < 0) bytes_to_schedule = 0;
rrc_status = mac_eNB_get_rrc_status(module_idP, rnti);
if ( (bytes_to_schedule > 0) || (UE_list->UE_template[CC_id][UE_id].ul_SR > 0) ||
if ( (bytes_to_schedule > 0) || (UE_info->UE_template[CC_id][UE_id].ul_SR > 0) ||
((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(rrc_status < RRC_CONNECTED)) ||
((UE_sched_ctl->cqi_req_timer>300)&&((rrc_status >= RRC_CONNECTED))) ) {
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
......@@ -2515,7 +2474,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
if(bytes_to_schedule > 0)
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].ul_total_buffer = bytes_to_schedule;
else if(UE_list->UE_template[CC_id][UE_id].ul_SR > 0)
else if(UE_info->UE_template[CC_id][UE_id].ul_SR > 0)
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].ul_total_buffer = 0;
ulsch_ue_select[CC_id].list[ulsch_ue_select[CC_id].ue_num].UE_id = UE_id;
......@@ -2526,7 +2485,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
//inactivity UE
/* if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
ul_period = 50;
......@@ -2555,7 +2514,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
rnti = UE_RNTI(module_idP,ul_inactivity_id[CC_id][temp]);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[ul_inactivity_id[CC_id][temp]].dl_cqi[CC_id],format0);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_info->UE_sched_ctrl[ul_inactivity_id[CC_id][temp]].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
......@@ -2597,7 +2556,7 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP,
ULSCH_UE_SELECT ulsch_ue_select[MAX_NUM_CCs]) {
int CC_id,ulsch_ue_num;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list= &eNB->UE_list;
UE_info_t *UE_info= &eNB->UE_info;
UE_TEMPLATE *UE_template = NULL;
LTE_DL_FRAME_PARMS *frame_parms = NULL;
uint8_t ue_num_temp;
......@@ -2696,9 +2655,9 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP,
}
}
} else {
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
if ( UE_list->UE_sched_ctrl[UE_id].phr_received == 1 ) {
if ( UE_info->UE_sched_ctrl[UE_id].phr_received == 1 ) {
mcs = 20;
} else {
mcs = 10;
......@@ -2734,9 +2693,9 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP,
if ( rb_table[rb_table_index] <= average_rbs ) {
// assigne RBS( nb_rb)
first_rb[CC_id] = first_rb[CC_id] + rb_table[rb_table_index];
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index];
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs;
UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index];
UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index;
UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs;
}
if ( rb_table[rb_table_index] > average_rbs ) {
......@@ -2750,31 +2709,31 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP,
}
first_rb[CC_id] = first_rb[CC_id] + rb_table[rb_table_index];
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index];
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs;
UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index];
UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index;
UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs;
}
} else {
if (mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP, UE_id)) < RRC_CONNECTED) {
// assigne RBS( 6 RBs)
first_rb[CC_id] = first_rb[CC_id] + 6;
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 6;
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 5;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 6;
UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 5;
UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
} else {
// assigne RBS( 3 RBs)
first_rb[CC_id] = first_rb[CC_id] + 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3;
UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2;
UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
}
}
} else if ( ulsch_ue_select[CC_id].list[ulsch_ue_num].ue_priority == SCH_UL_INACTIVE ) {
// assigne RBS( 3 RBs)
first_rb[CC_id] = first_rb[CC_id] + 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3;
UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2;
UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
}
}
......@@ -2954,7 +2913,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
int N_RB_UL;
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc;
UE_list_t *UE_list=&eNB->UE_list;
UE_info_t *UE_info=&eNB->UE_info;
UE_TEMPLATE *UE_template;
UE_sched_ctrl_t *UE_sched_ctrl;
int sched_frame=frameP;
......@@ -3063,8 +3022,8 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
continue;
}
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = subframe2harqpid(cc,sched_frame,sched_subframeP);
rnti = UE_RNTI(CC_id,UE_id);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
......@@ -3157,9 +3116,9 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
UE_sched_ctrl->cqi_req_timer);
ndi = 1-UE_template->oldNDI_UL[harq_pid];
UE_template->oldNDI_UL[harq_pid]=ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=UE_template->pre_assigned_mcs_ul;
UE_info->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=UE_template->pre_assigned_mcs_ul;
UE_template->mcs_UL[harq_pid] = UE_template->pre_assigned_mcs_ul;//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
if (UE_template->pre_allocated_rb_table_index_ul >=0) {
......@@ -3169,10 +3128,10 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
rb_table_index=5; // for PHR
}
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=UE_template->mcs_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=UE_template->mcs_UL[harq_pid];
UE_template->TBS_UL[harq_pid] = get_TBS_UL(UE_template->mcs_UL[harq_pid],rb_table[rb_table_index]);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=rb_table[rb_table_index];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS=UE_template->TBS_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=rb_table[rb_table_index];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS=UE_template->TBS_UL[harq_pid];
T(T_ENB_MAC_UE_UL_SCHEDULE, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(UE_template->mcs_UL[harq_pid]), T_INT(first_rb[CC_id]), T_INT(rb_table[rb_table_index]),
T_INT(UE_template->TBS_UL[harq_pid]), T_INT(ndi));
......@@ -3190,7 +3149,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
UE_template->cqi_req[harq_pid] = cqi_req;
UE_sched_ctrl->ul_scheduled |= (1<<harq_pid);
if (UE_id == UE_list->head)
if (UE_id == UE_info->list.head)
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED,UE_sched_ctrl->ul_scheduled);
// adjust total UL buffer status by TBS, wait for UL sdus to do final update
......@@ -3332,8 +3291,8 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(UE_template->mcs_UL[harq_pid]), T_INT(ulsch_ue_select[CC_id].list[ulsch_ue_num].start_rb), T_INT(ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb),
T_INT(round));
UE_list->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_info->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
uint8_t mcs_rv = 0;
if(rvidx_tab[round&3]==1) {
......@@ -3345,7 +3304,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
}
UE_template->TBS_UL[harq_pid] = get_TBS_UL(UE_template->mcs_UL[harq_pid],ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb);
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS=UE_template->TBS_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS=UE_template->TBS_UL[harq_pid];
if (mac_eNB_get_rrc_status(module_idP,rnti) < RRC_CONNECTED)
LOG_D(MAC,"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE %d (mcs %d, first rb %d, nb_rb %d, TBS %d, harq_pid %d)\n",
......
......@@ -133,13 +133,13 @@ schedule_ue_spec_phy_test(
/*
LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id, frameP,subframeP,UE_id,
rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
rnti,harq_pid,UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n");
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_info->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_info->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_info->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n");
AssertFatal(UE_info->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
*/
fill_nfapi_dlsch_config(eNB,
dl_req,
......@@ -170,7 +170,7 @@ schedule_ue_spec_phy_test(
(frameP*10)+subframeP,
TBS,
eNB->pdu_index[CC_id],
eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]);
eNB->UE_info.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]);
} else {
LOG_W(MAC,"[eNB_scheduler_phytest] DCI allocation infeasible!\n");
}
......@@ -192,7 +192,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
int N_RB_UL;
eNB_MAC_INST *mac = RC.mac[module_idP];
COMMON_channels_t *cc = &mac->common_channels[0];
UE_list_t *UE_list=&mac->UE_list;
UE_info_t *UE_info=&mac->UE_info;
UE_TEMPLATE *UE_template;
UE_sched_ctrl_t *UE_sched_ctrl;
int sched_frame=frameP;
......@@ -231,8 +231,8 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
first_rb[CC_id] = 1;
// loop over all active UEs
// if (eNB_UE_stats->mode == PUSCH) { // ue has a ulsch channel
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = subframe2harqpid(&cc[CC_id],sched_frame,sched_subframe);
RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP] = UE_template->TBS_UL[harq_pid];
//power control
......@@ -242,15 +242,15 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
// new transmission
ndi = 1-UE_template->oldNDI_UL[harq_pid];
UE_template->oldNDI_UL[harq_pid]=ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1 = mcs;
UE_info->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1 = mcs;
UE_template->mcs_UL[harq_pid] = mcs;//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = mcs;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = mcs;
// buffer_occupancy = UE_template->ul_total_buffer;
UE_template->TBS_UL[harq_pid] = get_TBS_UL(mcs,nb_rb);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = get_TBS_UL(mcs,nb_rb);
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += nb_rb;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = get_TBS_UL(mcs,nb_rb);
// buffer_occupancy -= TBS;
// bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB)
//store for possible retransmission
......
......@@ -1169,14 +1169,14 @@ program_dlsch_acknak(module_id_t module_idP,
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
rnti_t rnti = UE_RNTI(module_idP, UE_idP);
nfapi_ul_config_request_body_t *ul_req;
nfapi_ul_config_request_pdu_t *ul_config_pdu;
int use_simultaneous_pucch_pusch = 0;
nfapi_ul_config_ulsch_harq_information *ulsch_harq_information = NULL;
nfapi_ul_config_harq_information *harq_information = NULL;
struct LTE_PhysicalConfigDedicated__ext2 *ext2 = UE_list->UE_template[CC_idP][UE_idP].physicalConfigDedicated->ext2;
struct LTE_PhysicalConfigDedicated__ext2 *ext2 = UE_info->UE_template[CC_idP][UE_idP].physicalConfigDedicated->ext2;
if (ext2 &&
ext2->pucch_ConfigDedicated_v1020 &&
......@@ -1363,12 +1363,12 @@ fill_nfapi_ulsch_harq_information(module_id_t module_
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
int UE_id = find_UE_id(module_idP, rntiP);
nfapi_ul_config_ulsch_harq_information_rel10_t *harq_information_rel10 = &harq_information->harq_information_rel10;
AssertFatal(UE_id >= 0, "UE_id cannot be found, impossible\n");
AssertFatal(UE_list != NULL, "UE_list is null\n");
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated;
AssertFatal(UE_info != NULL, "UE_info is null\n");
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated;
AssertFatal(physicalConfigDedicated != NULL, "physicalConfigDedicated for rnti %x is null\n",
rntiP);
struct LTE_PUSCH_ConfigDedicated *puschConfigDedicated = physicalConfigDedicated->pusch_ConfigDedicated;
......@@ -1468,14 +1468,14 @@ fill_nfapi_harq_information(module_id_t module_idP,
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
int UE_id = find_UE_id(module_idP,
rntiP);
AssertFatal(UE_id >= 0, "UE_id cannot be found, impossible\n");
AssertFatal(UE_list != NULL, "UE_list is null\n");
AssertFatal(UE_info != NULL, "UE_info is null\n");
harq_information->harq_information_rel11.tl.tag = NFAPI_UL_CONFIG_REQUEST_HARQ_INFORMATION_REL11_TAG;
harq_information->harq_information_rel11.num_ant_ports = 1;
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated;
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated;
struct LTE_PUCCH_ConfigDedicated *pucch_ConfigDedicated = NULL;
if (physicalConfigDedicated != NULL) pucch_ConfigDedicated = physicalConfigDedicated->pucch_ConfigDedicated;
......@@ -1489,7 +1489,7 @@ fill_nfapi_harq_information(module_id_t module_idP,
case 6:
case 7:
if (cc->tdd_Config != NULL) {
// AssertFatal(UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated->pucch_ConfigDedicated != NULL,
// AssertFatal(UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated->pucch_ConfigDedicated != NULL,
// "pucch_ConfigDedicated is null for TDD!\n");
if (physicalConfigDedicated != NULL && pucch_ConfigDedicated != NULL &&
pucch_ConfigDedicated->tdd_AckNackFeedbackMode != NULL &&
......@@ -1879,8 +1879,7 @@ mpdcch_sf_condition(eNB_MAC_INST *eNB,
break;
case TYPEUESPEC:
epdcch_setconfig_r11 =
eNB->UE_list.UE_template[CC_id][UE_id].physicalConfigDedicated->ext4->epdcch_Config_r11->config_r11.choice.setup.setConfigToAddModList_r11->list.array[0];
epdcch_setconfig_r11 = eNB->UE_info.UE_template[CC_id][UE_id].physicalConfigDedicated->ext4->epdcch_Config_r11->config_r11.choice.setup.setConfigToAddModList_r11->list.array[0];
AssertFatal(epdcch_setconfig_r11 != NULL, " epdcch_setconfig_r11 is null for UE specific \n");
AssertFatal(epdcch_setconfig_r11->ext2 != NULL, " ext2 doesn't exist in epdcch config ' \n");
......@@ -1988,11 +1987,11 @@ find_UE_id(module_id_t mod_idP,
//------------------------------------------------------------------------------
{
int UE_id;
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] == TRUE) {
if (UE_list->UE_template[UE_PCCID(mod_idP, UE_id)][UE_id].rnti == rntiP) {
if (UE_info->active[UE_id] == TRUE) {
if (UE_info->UE_template[UE_PCCID(mod_idP, UE_id)][UE_id].rnti == rntiP) {
return UE_id;
}
}
......@@ -2027,7 +2026,7 @@ find_RA_id(module_id_t mod_idP,
//------------------------------------------------------------------------------
int
UE_num_active_CC(UE_list_t *listP,
UE_num_active_CC(UE_info_t *listP,
int ue_idP)
//------------------------------------------------------------------------------
{
......@@ -2040,7 +2039,7 @@ UE_PCCID(module_id_t mod_idP,
int ue_idP)
//------------------------------------------------------------------------------
{
return (RC.mac[mod_idP]->UE_list.pCC_id[ue_idP]);
return (RC.mac[mod_idP]->UE_info.pCC_id[ue_idP]);
}
//------------------------------------------------------------------------------
......@@ -2051,7 +2050,7 @@ UE_RNTI(module_id_t mod_idP,
{
if (!RC.mac || !RC.mac[mod_idP]) return 0;
rnti_t rnti = RC.mac[mod_idP]->UE_list.UE_template[UE_PCCID(mod_idP,
rnti_t rnti = RC.mac[mod_idP]->UE_info.UE_template[UE_PCCID(mod_idP,
ue_idP)][ue_idP].rnti;
if (rnti > 0) {
......@@ -2069,7 +2068,7 @@ is_UE_active(module_id_t mod_idP,
int ue_idP)
//------------------------------------------------------------------------------
{
return (RC.mac[mod_idP]->UE_list.active[ue_idP]);
return (RC.mac[mod_idP]->UE_info.active[ue_idP]);
}
//------------------------------------------------------------------------------
......@@ -2122,59 +2121,46 @@ get_aggregation(uint8_t bw_index,
//------------------------------------------------------------------------------
/*
* Dump the UL or DL UE_list into LOG_T(MAC)
* Dump the UE_list into LOG_T(MAC)
*/
void
dump_ue_list(UE_list_t *listP,
int ul_flag)
dump_ue_list(UE_list_t *listP) {
for (int j = listP->head; j >= 0; j = listP->next[j])
LOG_T(MAC, "DL list node %d => %d\n", j, listP->next[j]);
}
//------------------------------------------------------------------------------
{
if (ul_flag == 0) {
for (int j = listP->head; j >= 0; j = listP->next[j]) {
LOG_T(MAC, "DL list node %d => %d\n",
j,
listP->next[j]);
}
/*
* Add a UE to UE_list listP
*/
inline void add_ue_list(UE_list_t *listP, int UE_id) {
if (listP->head == -1) {
listP->head = UE_id;
listP->next[UE_id] = -1;
} else {
for (int j = listP->head_ul; j >= 0; j = listP->next_ul[j]) {
LOG_T(MAC, "UL list node %d => %d\n",
j,
listP->next_ul[j]);
}
int i = listP->head;
while (listP->next[i] >= 0)
i = listP->next[i];
listP->next[i] = UE_id;
listP->next[UE_id] = -1;
}
return;
}
//------------------------------------------------------------------------------
/*
* Add a UE to the UL or DL UE_list listP
* Remove a UE from the UE_list listP, return the previous element
*/
void
add_ue_list(UE_list_t *listP, int UE_id, int ul_flag) {
if (ul_flag == 0) {
if (listP->head == -1) {
listP->head = UE_id;
listP->next[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next[i] >= 0)
i = listP->next[i];
listP->next[i] = UE_id;
listP->next[UE_id] = -1;
}
} else {
if (listP->head_ul == -1) {
listP->head_ul = UE_id;
listP->next_ul[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next_ul[i] >= 0)
i = listP->next[i];
listP->next_ul[i] = UE_id;
listP->next_ul[UE_id] = -1;
}
inline int remove_ue_list(UE_list_t *listP, int UE_id) {
listP->next[UE_id] = -1;
if (listP->head == UE_id) {
listP->head = listP->next[UE_id];
return -1;
}
int previous = prev(listP, UE_id);
if (previous != -1)
listP->next[previous] = listP->next[UE_id];
return previous;
}
//------------------------------------------------------------------------------
......@@ -2189,52 +2175,49 @@ add_new_ue(module_id_t mod_idP,
{
int UE_id;
int i, j;
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
LOG_D(MAC, "[eNB %d, CC_id %d] Adding UE with rnti %x (next avail %d, num_UEs %d)\n",
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
LOG_D(MAC, "[eNB %d, CC_id %d] Adding UE with rnti %x (prev. num_UEs %d)\n",
mod_idP,
cc_idP,
rntiP,
UE_list->avail,
UE_list->num_UEs);
UE_info->num_UEs);
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] == TRUE)
if (UE_info->active[i] == TRUE)
continue;
UE_id = i;
memset(&UE_list->UE_template[cc_idP][UE_id], 0, sizeof(UE_TEMPLATE));
UE_list->UE_template[cc_idP][UE_id].rnti = rntiP;
UE_list->UE_template[cc_idP][UE_id].configured = FALSE;
UE_list->numactiveCCs[UE_id] = 1;
UE_list->numactiveULCCs[UE_id] = 1;
UE_list->pCC_id[UE_id] = cc_idP;
UE_list->ordered_CCids[0][UE_id] = cc_idP;
UE_list->ordered_ULCCids[0][UE_id] = cc_idP;
UE_list->num_UEs++;
UE_list->active[UE_id] = TRUE;
add_ue_list(UE_list, UE_id, 0);
dump_ue_list(UE_list, 0);
add_ue_list(UE_list, UE_id, 1);
dump_ue_list(UE_list, 1);
memset(&UE_info->UE_template[cc_idP][UE_id], 0, sizeof(UE_TEMPLATE));
UE_info->UE_template[cc_idP][UE_id].rnti = rntiP;
UE_info->UE_template[cc_idP][UE_id].configured = FALSE;
UE_info->numactiveCCs[UE_id] = 1;
UE_info->numactiveULCCs[UE_id] = 1;
UE_info->pCC_id[UE_id] = cc_idP;
UE_info->ordered_CCids[0][UE_id] = cc_idP;
UE_info->ordered_ULCCids[0][UE_id] = cc_idP;
UE_info->num_UEs++;
UE_info->active[UE_id] = TRUE;
add_ue_list(&UE_info->list, UE_id);
dump_ue_list(&UE_info->list);
if (IS_SOFTMODEM_IQPLAYER)// not specific to record/playback ?
UE_list->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0;
UE_list->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type;
memset((void *) &UE_list->UE_sched_ctrl[UE_id],
UE_info->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0;
UE_info->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type;
memset((void *) &UE_info->UE_sched_ctrl[UE_id],
0,
sizeof(UE_sched_ctrl_t));
memset((void *) &UE_list->eNB_UE_stats[cc_idP][UE_id],
memset((void *) &UE_info->eNB_UE_stats[cc_idP][UE_id],
0,
sizeof(eNB_UE_STATS));
UE_list->UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
UE_info->UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
/* default slice in case there was something different */
UE_list->assoc_ul_slice_idx[UE_id] = 0;
UE_list->UE_sched_ctrl[UE_id].ta_update = 31;
UE_info->assoc_ul_slice_idx[UE_id] = 0;
UE_info->UE_sched_ctrl[UE_id].ta_update = 31;
for (j = 0; j < 8; j++) {
UE_list->UE_template[cc_idP][UE_id].oldNDI[j] = 0;
UE_list->UE_template[cc_idP][UE_id].oldNDI_UL[j] = 0;
UE_list->UE_sched_ctrl[UE_id].round[cc_idP][j] = 8;
UE_list->UE_sched_ctrl[UE_id].round_UL[cc_idP][j] = 0;
UE_info->UE_template[cc_idP][UE_id].oldNDI[j] = 0;
UE_info->UE_template[cc_idP][UE_id].oldNDI_UL[j] = 0;
UE_info->UE_sched_ctrl[UE_id].round[cc_idP][j] = 8;
UE_info->UE_sched_ctrl[UE_id].round_UL[cc_idP][j] = 0;
}
eNB_ulsch_info[mod_idP][cc_idP][UE_id].status = S_UL_WAITING;
......@@ -2247,10 +2230,8 @@ add_new_ue(module_id_t mod_idP,
return (UE_id);
}
// printf("MAC: cannot add new UE for rnti %x\n", rntiP);
LOG_E(MAC, "error in add_new_ue(), could not find space in UE_list, Dumping UE list\n");
dump_ue_list(UE_list,
0);
dump_ue_list(&UE_info->list);
return -1;
}
......@@ -2263,7 +2244,7 @@ rrc_mac_remove_ue(module_id_t mod_idP,
rnti_t rntiP)
//------------------------------------------------------------------------------
{
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
int UE_id = find_UE_id(mod_idP, rntiP);
eNB_UE_STATS *ue_stats = NULL;
int pCC_id = -1;
......@@ -2279,38 +2260,14 @@ rrc_mac_remove_ue(module_id_t mod_idP,
UE_id,
pCC_id,
rntiP);
UE_list->active[UE_id] = FALSE;
UE_list->num_UEs--;
UE_list->next[UE_id] = -1;
UE_list->next_ul[UE_id] = -1;
/* If present, remove UE from DL list */
if (UE_list->head == UE_id) {
UE_list->head = UE_list->next[UE_id];
} else {
int previous = prev(UE_list, UE_id, 0);
UE_info->active[UE_id] = FALSE;
UE_info->num_UEs--;
if (previous != -1) {
UE_list->next[previous] = UE_list->next[UE_id];
}
}
/* If present, remove UE from UL list */
if (UE_list->head_ul == UE_id) {
UE_list->head_ul = UE_list->next_ul[UE_id];
} else {
int previous = prev(UE_list, UE_id, 1);
if (previous != -1) {
UE_list->next_ul[previous] = UE_list->next_ul[UE_id];
}
}
remove_ue_list(&UE_info->list, UE_id);
/* Clear all remaining pending transmissions */
memset(&UE_list->UE_template[pCC_id][UE_id],
0,
sizeof(UE_TEMPLATE));
ue_stats = &UE_list->eNB_UE_stats[pCC_id][UE_id];
memset(&UE_info->UE_template[pCC_id][UE_id], 0, sizeof(UE_TEMPLATE));
ue_stats = &UE_info->eNB_UE_stats[pCC_id][UE_id];
ue_stats->total_rbs_used = 0;
ue_stats->total_rbs_used_retx = 0;
......@@ -2387,39 +2344,16 @@ rrc_mac_remove_ue(module_id_t mod_idP,
/*
* Returns the previous UE_id in the scheduling list in UL or DL
*/
int
prev(UE_list_t *listP,
int nodeP,
int ul_flag)
//------------------------------------------------------------------------------
{
if (ul_flag == 0) {
if (nodeP == listP->head) {
return nodeP;
}
inline int prev(UE_list_t *listP, int nodeP) {
if (nodeP == listP->head)
return -1; /* there is no previous of the head */
for (int j = listP->head; j >= 0; j = listP->next[j]) {
if (listP->next[j] == nodeP) {
return j;
}
}
} else {
if (nodeP == listP->head_ul) {
return nodeP;
}
for (int j = listP->head_ul; j >= 0; j = listP->next_ul[j]) {
if (listP->next_ul[j] == nodeP) {
return j;
}
}
}
for (int j = listP->head; j >= 0; j = listP->next[j])
if (listP->next[j] == nodeP)
return j;
LOG_E(MAC, "error in prev(), could not find previous to %d in UE_list %s, should never happen, Dumping UE list\n",
nodeP,
(ul_flag == 0) ? "DL" : "UL");
dump_ue_list(listP,
ul_flag);
LOG_E(MAC, "%s(): could not find previous to %d in UE_list\n", __func__, nodeP);
dump_ue_list(listP);
return -1;
}
......@@ -2431,8 +2365,8 @@ UE_is_to_be_scheduled(module_id_t module_idP,
uint8_t UE_id)
//------------------------------------------------------------------------------
{
UE_TEMPLATE *UE_template = &RC.mac[module_idP]->UE_list.UE_template[CC_id][UE_id];
UE_sched_ctrl_t *UE_sched_ctl = &RC.mac[module_idP]->UE_list.UE_sched_ctrl[UE_id];
UE_TEMPLATE *UE_template = &RC.mac[module_idP]->UE_info.UE_template[CC_id][UE_id];
UE_sched_ctrl_t *UE_sched_ctl = &RC.mac[module_idP]->UE_info.UE_sched_ctrl[UE_id];
int rrc_status;
// do not schedule UE if UL is not working
......@@ -2474,7 +2408,7 @@ get_tmode(module_id_t module_idP,
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
struct LTE_PhysicalConfigDedicated *physicalConfigDedicated = eNB->UE_list.UE_template[CC_idP][UE_idP].physicalConfigDedicated;
struct LTE_PhysicalConfigDedicated *physicalConfigDedicated = eNB->UE_info.UE_template[CC_idP][UE_idP].physicalConfigDedicated;
if (physicalConfigDedicated == NULL) { // RRCConnectionSetup not received by UE yet
AssertFatal(cc->p_eNB <= 2, "p_eNB is %d, should be <2\n",
......@@ -3841,15 +3775,15 @@ extract_harq(module_id_t mod_idP,
//------------------------------------------------------------------------------
{
eNB_MAC_INST *eNB = RC.mac[mod_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_sched_ctrl_t *sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_info_t *UE_info = &eNB->UE_info;
UE_sched_ctrl_t *sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
rnti_t rnti = UE_RNTI(mod_idP, UE_id);
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
nfapi_harq_indication_fdd_rel13_t *harq_indication_fdd;
nfapi_harq_indication_tdd_rel13_t *harq_indication_tdd;
uint16_t num_ack_nak;
int numCC = UE_list->numactiveCCs[UE_id];
int pCCid = UE_list->pCC_id[UE_id];
int numCC = UE_info->numactiveCCs[UE_id];
int pCCid = UE_info->pCC_id[UE_id];
int spatial_bundling = 0;
int tmode[5];
int i, j, m;
......@@ -3857,7 +3791,7 @@ extract_harq(module_id_t mod_idP,
sub_frame_t subframe_tx;
int frame_tx;
uint8_t harq_pid;
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_list->UE_template[pCCid][UE_id].physicalConfigDedicated;
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_info->UE_template[pCCid][UE_id].physicalConfigDedicated;
if (physicalConfigDedicated != NULL && physicalConfigDedicated->pucch_ConfigDedicated != NULL &&
physicalConfigDedicated->ext7 != NULL && physicalConfigDedicated->ext7->pucch_ConfigDedicated_r13 != NULL &&
......@@ -3991,7 +3925,7 @@ extract_harq(module_id_t mod_idP,
sched_ctl->round[CC_idP][harq_pid]);
// use 1 HARQ proces of BL/CE UE for now
if (UE_list->UE_template[pCCid][UE_id].rach_resource_type > 0) harq_pid = 0;
if (UE_info->UE_template[pCCid][UE_id].rach_resource_type > 0) harq_pid = 0;
switch (harq_indication_fdd->mode) {
case 0: // Format 1a/b (10.1.2.1)
......@@ -4477,18 +4411,18 @@ extract_pucch_csi(module_id_t mod_idP,
uint8_t length)
//------------------------------------------------------------------------------
{
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_sched_ctrl_t *sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
UE_sched_ctrl_t *sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
COMMON_channels_t *cc = &RC.mac[mod_idP]->common_channels[CC_idP];
int no_pmi;
uint8_t Ltab[6] = { 0, 2, 4, 4, 4, 4 };
uint8_t Jtab[6] = { 0, 2, 2, 3, 4, 4 };
int feedback_cnt;
AssertFatal(UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated != NULL, "physicalConfigDedicated is null for UE %d\n",
AssertFatal(UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated != NULL, "physicalConfigDedicated is null for UE %d\n",
UE_id);
AssertFatal(UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL, "cqi_ReportConfig is null for UE %d\n",
AssertFatal(UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL, "cqi_ReportConfig is null for UE %d\n",
UE_id);
struct LTE_CQI_ReportPeriodic *cqi_ReportPeriodic = UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportPeriodic;
struct LTE_CQI_ReportPeriodic *cqi_ReportPeriodic = UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportPeriodic;
AssertFatal(cqi_ReportPeriodic != NULL, "cqi_ReportPeriodic is null for UE %d\n",
UE_id);
// determine feedback mode
......@@ -4588,9 +4522,9 @@ extract_pusch_csi(module_id_t mod_idP,
uint8_t length)
//------------------------------------------------------------------------------
{
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
COMMON_channels_t *cc = &RC.mac[mod_idP]->common_channels[CC_idP];
UE_sched_ctrl_t *sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctrl_t *sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
int Ntab[6] = { 0, 4, 7, 9, 10, 13 };
int Ntab_uesel[6] = { 0, 8, 13, 17, 19, 25 };
int Ltab_uesel[6] = { 0, 6, 9, 13, 15, 18 };
......@@ -4599,12 +4533,12 @@ extract_pusch_csi(module_id_t mod_idP,
int i;
uint64_t p = *(uint64_t *) pdu;
int curbyte, curbit;
AssertFatal(UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated != NULL, "physicalConfigDedicated is null for UE %d\n",
AssertFatal(UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated != NULL, "physicalConfigDedicated is null for UE %d\n",
UE_id);
AssertFatal(UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL, "cqi_ReportConfig is null for UE %d\n",
AssertFatal(UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig != NULL, "cqi_ReportConfig is null for UE %d\n",
UE_id);
LTE_CQI_ReportModeAperiodic_t *cqi_ReportModeAperiodic
= UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportModeAperiodic;
= UE_info->UE_template[CC_idP][UE_id].physicalConfigDedicated->cqi_ReportConfig->cqi_ReportModeAperiodic;
AssertFatal(cqi_ReportModeAperiodic != NULL, "cqi_ReportModeAperiodic is null for UE %d\n",
UE_id);
int N = Ntab[cc->mib->message.dl_Bandwidth];
......@@ -4874,14 +4808,14 @@ cqi_indication(module_id_t mod_idP,
//------------------------------------------------------------------------------
{
int UE_id = find_UE_id(mod_idP, rntiP);
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
if (UE_id == -1) {
LOG_W(MAC, "cqi_indication: UE %x not found\n", rntiP);
return;
}
UE_sched_ctrl_t *sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctrl_t *sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
if (UE_id >= 0) {
LOG_D(MAC,"%s() UE_id:%d channel:%d cqi:%d\n",
......@@ -4947,11 +4881,11 @@ SR_indication(module_id_t mod_idP,
T_INT(subframeP),
T_INT(rntiP));
int UE_id = find_UE_id(mod_idP, rntiP);
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
UE_sched_ctrl_t *UE_scheduling_ctrl = NULL;
if (UE_id != -1) {
UE_scheduling_ctrl = &(UE_list->UE_sched_ctrl[UE_id]);
UE_scheduling_ctrl = &(UE_info->UE_sched_ctrl[UE_id]);
if ((UE_scheduling_ctrl->cdrx_configured == TRUE) &&
(UE_scheduling_ctrl->dci0_ongoing_timer > 0) &&
......@@ -4975,8 +4909,8 @@ SR_indication(module_id_t mod_idP,
cc_idP);
}
UE_list->UE_template[cc_idP][UE_id].ul_SR = 1;
UE_list->UE_template[cc_idP][UE_id].ul_active = TRUE;
UE_info->UE_template[cc_idP][UE_id].ul_SR = 1;
UE_info->UE_template[cc_idP][UE_id].ul_active = TRUE;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SR_INDICATION, 1);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SR_INDICATION, 0);
}
......@@ -5003,7 +4937,7 @@ UL_failure_indication(module_id_t mod_idP,
//------------------------------------------------------------------------------
{
int UE_id = find_UE_id(mod_idP, rntiP);
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
if (UE_id != -1) {
LOG_D(MAC, "[eNB %d][UE %d/%x] Frame %d subframeP %d Signaling UL Failure for UE %d on CC_id %d (timer %d)\n",
......@@ -5014,9 +4948,9 @@ UL_failure_indication(module_id_t mod_idP,
subframeP,
UE_id,
cc_idP,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer);
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer);
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer == 0) UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
if (UE_info->UE_sched_ctrl[UE_id].ul_failure_timer == 0) UE_info->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
} else {
// AssertFatal(0, "find_UE_id(%u,rnti %d) not found", enb_mod_idP, rntiP);
// AssertError(0, 0, "Frame %d: find_UE_id(%u,rnti %d) not found\n", frameP, enb_mod_idP, rntiP);
......@@ -5079,8 +5013,8 @@ harq_indication(module_id_t mod_idP,
return;
}
UE_list_t *UE_list = &RC.mac[mod_idP]->UE_list;
UE_sched_ctrl_t *sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_info_t *UE_info = &RC.mac[mod_idP]->UE_info;
UE_sched_ctrl_t *sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
COMMON_channels_t *cc = &RC.mac[mod_idP]->common_channels[CC_idP];
// extract HARQ Information
......@@ -5138,5 +5072,5 @@ ue_ul_slice_membership(module_id_t mod_id,
return 0;
}
return eNB->UE_list.active[UE_id] == TRUE && eNB->UE_list.assoc_ul_slice_idx[UE_id] == slice_idx;
return eNB->UE_info.active[UE_id] == TRUE && eNB->UE_info.assoc_ul_slice_idx[UE_id] == slice_idx;
}
......@@ -123,17 +123,15 @@ rx_sdu(const module_id_t enb_mod_idP,
unsigned short rx_lengths[NB_RB_MAX];
uint8_t lcgid = 0;
int lcgid_updated[4] = {0, 0, 0, 0};
eNB_MAC_INST *mac = NULL;
UE_list_t *UE_list = NULL;
eNB_MAC_INST *mac = RC.mac[enb_mod_idP];
UE_info_t *UE_info = &mac->UE_info;
rrc_eNB_ue_context_t *ue_contextP = NULL;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
UE_TEMPLATE *UE_template_ptr = NULL;
/* Init */
current_rnti = rntiP;
UE_id = find_UE_id(enb_mod_idP, current_rnti);
mac = RC.mac[enb_mod_idP];
harq_pid = subframe2harqpid(&mac->common_channels[CC_idP], frameP, subframeP);
UE_list = &mac->UE_list;
memset(rx_ces, 0, MAX_NUM_CE * sizeof(unsigned char));
memset(rx_lcids, 0, NB_RB_MAX * sizeof(unsigned char));
memset(rx_lengths, 0, NB_RB_MAX * sizeof(unsigned short));
......@@ -142,8 +140,8 @@ rx_sdu(const module_id_t enb_mod_idP,
trace_pdu(DIRECTION_UPLINK, sduP, sdu_lenP, 0, WS_C_RNTI, current_rnti, frameP, subframeP, 0, 0);
if (UE_id != -1) {
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_template_ptr = &(UE_list->UE_template[CC_idP][UE_id]);
UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id];
UE_template_ptr = &UE_info->UE_template[CC_idP][UE_id];
LOG_D(MAC, "[eNB %d][PUSCH %d] CC_id %d %d.%d Received ULSCH sdu round %d from PHY (rnti %x, UE_id %d) ul_cqi %d\n",
enb_mod_idP,
harq_pid,
......@@ -413,8 +411,8 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_id = old_UE_id;
current_rnti = old_rnti;
/* Clear timer */
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_template_ptr = &(UE_list->UE_template[CC_idP][UE_id]);
UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id];
UE_template_ptr = &UE_info->UE_template[CC_idP][UE_id];
UE_scheduling_control->uplane_inactivity_timer = 0;
UE_scheduling_control->ul_inactivity_timer = 0;
UE_scheduling_control->ul_failure_timer = 0;
......@@ -430,7 +428,7 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_template_ptr->ul_SR = 1;
UE_scheduling_control->crnti_reconfigurationcomplete_flag = 1;
UE_list->UE_template[UE_PCCID(enb_mod_idP, UE_id)][UE_id].configured = 1;
UE_info->UE_template[UE_PCCID(enb_mod_idP, UE_id)][UE_id].configured = 1;
cancel_ra_proc(enb_mod_idP,
CC_idP,
frameP,
......@@ -480,8 +478,8 @@ rx_sdu(const module_id_t enb_mod_idP,
ra->crnti_rrc_mui = rrc_eNB_mui-1;
ra->crnti_harq_pid = -1;
/* Clear timer */
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_template_ptr = &(UE_list->UE_template[CC_idP][UE_id]);
UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id];
UE_template_ptr = &UE_info->UE_template[CC_idP][UE_id];
UE_scheduling_control->uplane_inactivity_timer = 0;
UE_scheduling_control->ul_inactivity_timer = 0;
UE_scheduling_control->ul_failure_timer = 0;
......@@ -532,7 +530,7 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_template_ptr->ul_buffer_info[LCGID3];
RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP] = (payload_ptr[0] & 0x3f);
if (UE_id == UE_list->head) {
if (UE_id == UE_info->list.head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BSR, (payload_ptr[0] & 0x3f));
}
......@@ -689,8 +687,8 @@ rx_sdu(const module_id_t enb_mod_idP,
frameP,
ra->rnti,
UE_id);
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
UE_template_ptr = &(UE_list->UE_template[CC_idP][UE_id]);
UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id];
UE_template_ptr = &UE_info->UE_template[CC_idP][UE_id];
}
} else {
LOG_D(MAC, "[eNB %d][RAPROC] CC_id %d Frame %d CCCH: Received Msg3 from already registered UE %d: length %d, offset %ld\n",
......@@ -779,11 +777,11 @@ rx_sdu(const module_id_t enb_mod_idP,
enb_mod_idP, CC_idP, frameP, rx_lengths[i], UE_id,
rx_lcids[i]);
mac_rlc_data_ind(enb_mod_idP, current_rnti, enb_mod_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, rx_lcids[i], (char *) payload_ptr, rx_lengths[i], 1, NULL); //(unsigned int*)crc_status);
UE_list->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_list->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
UE_info->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_info->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
if (mac_eNB_get_rrc_status(enb_mod_idP, current_rnti) < RRC_RECONFIGURED) {
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
UE_info->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}
}
......@@ -840,8 +838,8 @@ rx_sdu(const module_id_t enb_mod_idP,
if ((rx_lengths[i] < SCH_PAYLOAD_SIZE_MAX) && (rx_lengths[i] > 0)) { // MAX SIZE OF transport block
mac_rlc_data_ind(enb_mod_idP, current_rnti, enb_mod_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, rx_lcids[i], (char *) payload_ptr, rx_lengths[i], 1, NULL);
UE_list->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_list->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
UE_info->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_info->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
/* Clear uplane_inactivity_timer */
UE_scheduling_control->uplane_inactivity_timer = 0;
/* Reset RRC inactivity timer after uplane activity */
......@@ -856,7 +854,7 @@ rx_sdu(const module_id_t enb_mod_idP,
current_rnti);
}
} else { /* rx_length[i] Max size */
UE_list->eNB_UE_stats[CC_idP][UE_id].num_errors_rx += 1;
UE_info->eNB_UE_stats[CC_idP][UE_id].num_errors_rx += 1;
LOG_E(MAC, "[eNB %d] CC_id %d Frame %d : Max size of transport block reached LCID %d from UE %d ",
enb_mod_idP,
CC_idP,
......@@ -921,12 +919,12 @@ rx_sdu(const module_id_t enb_mod_idP,
/* NN--> FK: we could either check the payload, or use a phy helper to detect a false msg3 */
if ((num_sdu == 0) && (num_ce == 0)) {
if (UE_id != -1)
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_errors_rx += 1;
UE_info->eNB_UE_stats[CC_idP][UE_id].total_num_errors_rx += 1;
} else {
if (UE_id != -1) {
UE_list->eNB_UE_stats[CC_idP][UE_id].pdu_bytes_rx = sdu_lenP;
UE_list->eNB_UE_stats[CC_idP][UE_id].total_pdu_bytes_rx += sdu_lenP;
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_pdus_rx += 1;
UE_info->eNB_UE_stats[CC_idP][UE_id].pdu_bytes_rx = sdu_lenP;
UE_info->eNB_UE_stats[CC_idP][UE_id].total_pdu_bytes_rx += sdu_lenP;
UE_info->eNB_UE_stats[CC_idP][UE_id].total_num_pdus_rx += 1;
}
}
......@@ -1307,20 +1305,16 @@ schedule_ulsch_rnti(module_id_t module_idP,
static int32_t tpc_accumulated = 0;
int sched_frame = 0;
int CC_id = 0;
eNB_MAC_INST *mac = NULL;
COMMON_channels_t *cc = NULL;
UE_list_t *UE_list = NULL;
slice_info_t *sli = NULL;
eNB_MAC_INST *mac = RC.mac[module_idP];
COMMON_channels_t *cc = mac->common_channels;
UE_info_t *UE_info = &mac->UE_info;
slice_info_t *sli = &mac->slice_info;
UE_TEMPLATE *UE_template_ptr = NULL;
UE_sched_ctrl_t *UE_sched_ctrl_ptr = NULL;
int rvidx_tab[4] = {0, 2, 3, 1};
int first_rb_slice[NFAPI_CC_MAX];
int n_rb_ul_tab[NFAPI_CC_MAX];
/* Init */
mac = RC.mac[module_idP];
cc = mac->common_channels;
UE_list = &(mac->UE_list);
sli = &(mac->slice_info);
memset(first_rb_slice, 0, NFAPI_CC_MAX * sizeof(int));
memset(n_rb_ul_tab, 0, NFAPI_CC_MAX * sizeof(int));
sched_frame = frameP;
......@@ -1370,7 +1364,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
exit(1);
}
UE_list->first_rb_offset[CC_id][slice_idx] = cmin(n_rb_ul_tab[CC_id], sli->ul[slice_idx].first_rb);
UE_info->first_rb_offset[CC_id][slice_idx] = cmin(n_rb_ul_tab[CC_id], sli->ul[slice_idx].first_rb);
}
/*
......@@ -1382,19 +1376,19 @@ schedule_ulsch_rnti(module_id_t module_idP,
ulsch_scheduler_pre_processor(module_idP, slice_idx, frameP, subframeP, sched_frame, sched_subframeP, first_rb);
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_idx];
first_rb_slice[CC_id] = first_rb[CC_id] + UE_info->first_rb_offset[CC_id][slice_idx];
}
// loop over all active UEs until end of function
for (int UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx)) {
continue;
}
if (UE_list->UE_template[UE_PCCID(module_idP, UE_id)][UE_id].rach_resource_type > 0) continue;
if (UE_info->UE_template[UE_PCCID(module_idP, UE_id)][UE_id].rach_resource_type > 0) continue;
// don't schedule if Msg5 is not received yet
if (UE_list->UE_template[UE_PCCID(module_idP, UE_id)][UE_id].configured == FALSE) {
if (UE_info->UE_template[UE_PCCID(module_idP, UE_id)][UE_id].configured == FALSE) {
LOG_D(MAC, "[eNB %d] frame %d, subframe %d, UE %d: not configured, skipping UE scheduling \n",
module_idP,
frameP,
......@@ -1415,11 +1409,11 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
// loop over all active UL CC_ids for this UE until end of function
for (int n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (int n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
/* This is the actual CC_id in the list */
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template_ptr = &(UE_list->UE_template[CC_id][UE_id]);
UE_sched_ctrl_ptr = &(UE_list->UE_sched_ctrl[UE_id]);
CC_id = UE_info->ordered_ULCCids[n][UE_id];
UE_template_ptr = &UE_info->UE_template[CC_id][UE_id];
UE_sched_ctrl_ptr = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = subframe2harqpid(&cc[CC_id], sched_frame, sched_subframeP);
round_index = UE_sched_ctrl_ptr->round_UL[CC_id][harq_pid];
AssertFatal(round_index < 8, "round %d > 7 for UE %d/%x\n",
......@@ -1578,10 +1572,10 @@ schedule_ulsch_rnti(module_id_t module_idP,
ndi = 1 - UE_template_ptr->oldNDI_UL[harq_pid]; // NDI: new data indicator
UE_template_ptr->oldNDI_UL[harq_pid] = ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_info->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_template_ptr->mcs_UL[harq_pid] = cmin(UE_template_ptr->pre_assigned_mcs_ul, sli->ul[slice_idx].maxmcs);
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1= UE_template_ptr->mcs_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1= UE_template_ptr->mcs_UL[harq_pid];
/* CDRX */
if (UE_sched_ctrl_ptr->cdrx_configured) {
......@@ -1598,7 +1592,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
rb_table_index = 5; // for PHR
}
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template_ptr->mcs_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template_ptr->mcs_UL[harq_pid];
while (((rb_table[rb_table_index] > (n_rb_ul_tab[CC_id] - first_rb_slice[CC_id])) ||
(rb_table[rb_table_index] > 45)) && (rb_table_index > 0)) {
......@@ -1606,9 +1600,9 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
UE_template_ptr->TBS_UL[harq_pid] = get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid], rb_table[rb_table_index]);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += rb_table[rb_table_index];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = UE_template_ptr->TBS_UL[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].total_ulsch_TBS += UE_template_ptr->TBS_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += rb_table[rb_table_index];
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = UE_template_ptr->TBS_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].total_ulsch_TBS += UE_template_ptr->TBS_UL[harq_pid];
T(T_ENB_MAC_UE_UL_SCHEDULE,
T_INT(module_idP),
T_INT(CC_id),
......@@ -1627,7 +1621,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
UE_template_ptr->cqi_req[harq_pid] = cqi_req;
UE_sched_ctrl_ptr->ul_scheduled |= (1 << harq_pid);
if (UE_id == UE_list->head) {
if (UE_id == UE_info->list.head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED, UE_sched_ctrl_ptr->ul_scheduled);
}
......@@ -1926,7 +1920,7 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
eNB_MAC_INST *eNB = RC.mac[module_idP];
eNB_RRC_INST *rrc = RC.rrc[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &(eNB->UE_list);
UE_info_t *UE_info = &eNB->UE_info;
UE_TEMPLATE *UE_template = NULL;
UE_sched_ctrl_t *UE_sched_ctrl = NULL;
uint8_t Total_Num_Rep_ULSCH,pusch_maxNumRepetitionCEmodeA_r13;
......@@ -1944,8 +1938,8 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
nfapi_ul_config_request_pdu_t *ul_config_pdu_Rep;
/* Loop over all active UEs */
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
UE_template = &(UE_list->UE_template[UE_PCCID(module_idP, UE_id)][UE_id]);
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
UE_template = &UE_info->UE_template[UE_PCCID(module_idP, UE_id)][UE_id];
/* LTE-M device */
if (UE_template->rach_resource_type == 0) {
......@@ -1974,12 +1968,12 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
}
/* Loop over all active UL CC_ids for this UE */
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
/* This is the actual CC_id in the list */
CC_id = UE_list->ordered_ULCCids[n][UE_id];
CC_id = UE_info->ordered_ULCCids[n][UE_id];
N_RB_UL = to_prb(cc[CC_id].ul_Bandwidth);
UE_template = &(UE_list->UE_template[CC_id][UE_id]);
UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
harq_pid = 0;
round_UL = UE_sched_ctrl->round_UL[CC_id][harq_pid];
AssertFatal(round_UL < 8,"round_UL %d > 7 for UE %d/%x\n",
......@@ -2074,12 +2068,12 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
UE_template->oldNDI_UL[harq_pid] = ndi;
UE_template->mcs_UL[harq_pid] = 4;
UE_template->TBS_UL[harq_pid] = get_TBS_UL(UE_template->mcs_UL[harq_pid], 6);
UE_list->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1 = 4;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template->mcs_UL[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += 6;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = UE_template->TBS_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].snr = snr;
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr = target_snr;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1 = 4;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template->mcs_UL[harq_pid];
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += 6;
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = UE_template->TBS_UL[harq_pid];
T(T_ENB_MAC_UE_UL_SCHEDULE,
T_INT(module_idP),
T_INT(CC_id),
......@@ -2096,7 +2090,7 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
UE_template->nb_rb_ul[harq_pid] = 6;
UE_sched_ctrl->ul_scheduled |= (1 << harq_pid);
if (UE_id == UE_list->head) {
if (UE_id == UE_info->list.head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED, UE_sched_ctrl->ul_scheduled);
}
......
......@@ -1129,7 +1129,15 @@ typedef struct {
uint8_t sb_size;
uint8_t nb_active_sb;
} SBMAP_CONF;
/*! \brief UE list used by eNB to order UEs/CC for scheduling*/
/*! \brief UE_list_t is a "list" of users within UE_info_t. Especial useful in
* the scheduler and to keep "classes" of users. */
typedef struct {
int head;
int next[MAX_MOBILES_PER_ENB];
} UE_list_t;
/*! \brief UE info used by eNB to order UEs/CC for scheduling*/
typedef struct {
DLSCH_PDU DLSCH_pdu[NFAPI_CC_MAX][2][MAX_MOBILES_PER_ENB];
......@@ -1151,11 +1159,7 @@ typedef struct {
eNB_UE_STATS eNB_UE_stats[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
/// scheduling control info
UE_sched_ctrl_t UE_sched_ctrl[MAX_MOBILES_PER_ENB];
int next[MAX_MOBILES_PER_ENB];
int head;
int next_ul[MAX_MOBILES_PER_ENB];
int head_ul;
int avail;
UE_list_t list;
int num_UEs;
boolean_t active[MAX_MOBILES_PER_ENB];
......@@ -1163,7 +1167,7 @@ typedef struct {
uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM];
uint16_t first_rb_offset[NFAPI_CC_MAX][MAX_NUM_SLICES];
int assoc_ul_slice_idx[MAX_MOBILES_PER_ENB];
} UE_list_t;
} UE_info_t;
/*! \brief deleting control information*/
typedef struct {
......@@ -1395,7 +1399,7 @@ typedef struct eNB_MAC_INST_s {
nfapi_ue_release_request_t UE_release_req;
/// UL handle
uint32_t ul_handle;
UE_list_t UE_list;
UE_info_t UE_info;
/// slice-related configuration
slice_info_t slice_info;
......
......@@ -193,7 +193,7 @@ void add_msg3(module_id_t module_idP, int CC_id, RA_t *ra, frame_t frameP,
//main.c
void init_UE_list(UE_list_t *UE_list);
void init_UE_info(UE_info_t *UE_info);
void init_slice_info(slice_info_t *sli);
......@@ -448,7 +448,7 @@ boolean_t CCE_allocation_infeasible(int module_idP,
void set_ue_dai(sub_frame_t subframeP,
int UE_id,
uint8_t CC_id, uint8_t tdd_config, UE_list_t *UE_list);
uint8_t CC_id, uint8_t tdd_config, UE_info_t *UE_info);
uint8_t frame_subframe2_dl_harq_pid(LTE_TDD_Config_t *tdd_Config, int abs_frameP, sub_frame_t subframeP);
/** \brief First stage of PCH Scheduling. Gets a PCH SDU from RRC if available and computes the MCS required to transport it as a function of the SDU length. It assumes a length less than or equal to 64 bytes (MCS 6, 3 PRBs).
......@@ -685,10 +685,11 @@ int rrc_mac_remove_ue(module_id_t Mod_id, rnti_t rntiP);
void store_dlsch_buffer(module_id_t Mod_id, frame_t frameP, sub_frame_t subframeP);
void assign_rbs_required(module_id_t Mod_id, int CC_id, uint16_t nb_rbs_required[MAX_MOBILES_PER_ENB]);
int prev(UE_list_t *listP, int nodeP, int ul_flag);
void add_ue_list(UE_list_t *listP, int UE_id, int ul_flag);
void dump_ue_list(UE_list_t *listP, int ul_flag);
int UE_num_active_CC(UE_list_t *listP, int ue_idP);
int prev(UE_list_t *listP, int nodeP);
void add_ue_list(UE_list_t *listP, int UE_id);
int remove_ue_list(UE_list_t *listP, int UE_id);
void dump_ue_list(UE_list_t *listP);
int UE_num_active_CC(UE_info_t *listP, int ue_idP);
int UE_PCCID(module_id_t mod_idP, int ue_idP);
rnti_t UE_RNTI(module_id_t mod_idP, int ue_idP);
......
......@@ -43,23 +43,19 @@
extern RAN_CONTEXT_t RC;
void init_UE_list(UE_list_t *UE_list)
void init_UE_info(UE_info_t *UE_info)
{
int list_el;
UE_list->num_UEs = 0;
UE_list->head = -1;
UE_list->head_ul = -1;
UE_list->avail = 0;
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB; list_el++) {
UE_list->next[list_el] = -1;
UE_list->next_ul[list_el] = -1;
}
memset(UE_list->DLSCH_pdu, 0, sizeof(UE_list->DLSCH_pdu));
memset(UE_list->UE_template, 0, sizeof(UE_list->UE_template));
memset(UE_list->eNB_UE_stats, 0, sizeof(UE_list->eNB_UE_stats));
memset(UE_list->UE_sched_ctrl, 0, sizeof(UE_list->UE_sched_ctrl));
memset(UE_list->active, 0, sizeof(UE_list->active));
memset(UE_list->assoc_ul_slice_idx, 0, sizeof(UE_list->assoc_ul_slice_idx));
UE_info->num_UEs = 0;
UE_info->list.head = -1;
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB; list_el++)
UE_info->list.next[list_el] = -1;
memset(UE_info->DLSCH_pdu, 0, sizeof(UE_info->DLSCH_pdu));
memset(UE_info->UE_template, 0, sizeof(UE_info->UE_template));
memset(UE_info->eNB_UE_stats, 0, sizeof(UE_info->eNB_UE_stats));
memset(UE_info->UE_sched_ctrl, 0, sizeof(UE_info->UE_sched_ctrl));
memset(UE_info->active, 0, sizeof(UE_info->active));
memset(UE_info->assoc_ul_slice_idx, 0, sizeof(UE_info->assoc_ul_slice_idx));
}
void init_slice_info(slice_info_t *sli)
......@@ -134,7 +130,7 @@ void mac_top_init_eNB(void)
mac[i]->if_inst = IF_Module_init(i);
init_UE_list(&mac[i]->UE_list);
init_UE_info(&mac[i]->UE_info);
init_slice_info(&mac[i]->slice_info);
}
......@@ -160,12 +156,12 @@ void mac_init_cell_params(int Mod_idP, int CC_idP)
memset(&RC.mac[Mod_idP]->eNB_stats, 0, sizeof(eNB_STATS));
UE_template =
(UE_TEMPLATE *) & RC.mac[Mod_idP]->UE_list.UE_template[CC_idP][0];
(UE_TEMPLATE *) & RC.mac[Mod_idP]->UE_info.UE_template[CC_idP][0];
for (j = 0; j < MAX_MOBILES_PER_ENB; j++) {
UE_template[j].rnti = 0;
// initiallize the eNB to UE statistics
memset(&RC.mac[Mod_idP]->UE_list.eNB_UE_stats[CC_idP][j], 0,
memset(&RC.mac[Mod_idP]->UE_info.eNB_UE_stats[CC_idP][j], 0,
sizeof(eNB_UE_STATS));
}
......@@ -260,4 +256,4 @@ void *mac_enb_task(void *arg)
} // end while
return NULL;
}
\ No newline at end of file
}
......@@ -53,7 +53,6 @@ extern RAN_CONTEXT_t RC;
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
void
sort_ue_ul(module_id_t module_idP,
int slice_idx,
......@@ -66,14 +65,14 @@ void
store_dlsch_buffer(module_id_t Mod_id,
frame_t frameP,
sub_frame_t subframeP) {
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
for (int UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] != TRUE)
if (UE_info->active[UE_id] != TRUE)
continue;
/* TODO why UE_template per CC? */
UE_TEMPLATE *UE_template = &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id];
UE_TEMPLATE *UE_template = &UE_info->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id];
UE_template->dl_buffer_total = 0;
UE_template->dl_pdus_total = 0;
......@@ -132,17 +131,17 @@ store_dlsch_buffer(module_id_t Mod_id,
int get_rbs_required(module_id_t Mod_id,
int CC_id,
int UE_id) {
const UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
const UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
if (UE_list->UE_template[CC_id][UE_id].dl_buffer_total == 0)
if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total == 0)
return 0;
const int dlsch_mcs1 = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
const int dlsch_mcs1 = cqi_to_mcs[UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
int nb_rbs_required = min_rb_unit;
/* calculating required number of RBs for each UE */
int TBS = get_TBS_DL(dlsch_mcs1, nb_rbs_required);
while (TBS < UE_list->UE_template[CC_id][UE_id].dl_buffer_total) {
while (TBS < UE_info->UE_template[CC_id][UE_id].dl_buffer_total) {
nb_rbs_required += min_rb_unit;
TBS = get_TBS_DL(dlsch_mcs1, nb_rbs_required);
}
......@@ -153,11 +152,11 @@ void
assign_rbs_required(module_id_t Mod_id,
int CC_id,
uint16_t nb_rbs_required[MAX_MOBILES_PER_ENB]) {
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
nb_rbs_required[UE_id] = get_rbs_required(Mod_id, CC_id, UE_id);
// TODO: the following should not be here
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = cqi_to_mcs[UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
}
}
......@@ -167,7 +166,7 @@ maxround_ul(module_id_t Mod_id, uint16_t rnti, int sched_frame,
sub_frame_t sched_subframe) {
uint8_t round, round_max = 0, UE_id;
int CC_id, harq_pid;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
COMMON_channels_t *cc;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
......@@ -178,7 +177,7 @@ maxround_ul(module_id_t Mod_id, uint16_t rnti, int sched_frame,
continue;
harq_pid = subframe2harqpid(cc, sched_frame, sched_subframe);
round = UE_list->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid];
round = UE_info->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid];
if (round > round_max) {
round_max = round;
......@@ -200,21 +199,21 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
int ue_count_newtx = 0;
int ue_count_retx = 0;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
// Find total UE count, and account the RBs required for retransmissions
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
const rnti_t rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
const COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
const uint8_t round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
const uint8_t round = UE_info->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
// retransmission
if (round != 8) {
nb_rbs_required[UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
nb_rbs_required[UE_id] = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid];
rbs_retx += nb_rbs_required[UE_id];
ue_count_retx++;
} else {
......@@ -234,14 +233,14 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
average_rbs_per_user = (uint16_t)min_rb_unit;
}
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
const rnti_t rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
const COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
const uint8_t round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
const uint8_t round = UE_info->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
/* TODO the first seems unnecessary, remove it */
if (mac_eNB_get_rrc_status(Mod_id, rnti) < RRC_RECONFIGURED || round != 8)
......@@ -256,11 +255,11 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
uint16_t nb_rbs_required[MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[N_RBG_MAX]) {
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (UE_info->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
dlsch_scheduler_pre_processor_allocate(Mod_id,
UE_id,
......@@ -316,11 +315,11 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
nb_rbs_accounted,
rballoc_sub);
const UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
const UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
const COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
const int N_RBG = to_rbg(cc->mib->message.dl_Bandwidth);
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
const UE_sched_ctrl_t *ue_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
const UE_sched_ctrl_t *ue_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
if (ue_sched_ctrl->pre_nb_available_rbs[CC_id] == 0)
continue;
......@@ -334,10 +333,10 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
frameP,
subframeP,
UE_id,
UE_list->UE_template[CC_id][UE_id].rnti,
UE_info->UE_template[CC_id][UE_id].rnti,
s,
ue_sched_ctrl->pre_nb_available_rbs[CC_id],
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1);
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1);
}
}
......@@ -345,16 +344,16 @@ void
dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
int CC_id,
uint8_t rballoc_sub[N_RBG_MAX]) {
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
for (int UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) {
UE_sched_ctrl_t *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
UE_sched_ctrl_t *ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
const rnti_t rnti = UE_RNTI(module_idP, UE_id);
if (rnti == NOT_A_RNTI)
continue;
if (UE_list->active[UE_id] != TRUE)
if (UE_info->active[UE_id] != TRUE)
continue;
// initialize harq_pid and round
......@@ -389,7 +388,7 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
uint16_t nb_rbs_required,
uint16_t *nb_rbs_remaining,
uint8_t rballoc_sub[N_RBG_MAX]) {
UE_sched_ctrl_t *ue_sched_ctl = &RC.mac[Mod_id]->UE_list.UE_sched_ctrl[UE_id];
UE_sched_ctrl_t *ue_sched_ctl = &RC.mac[Mod_id]->UE_info.UE_sched_ctrl[UE_id];
const int N_RBG = to_rbg(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
const int N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
......@@ -442,7 +441,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
int16_t total_remaining_rbs[NFAPI_CC_MAX];
uint16_t total_ue_count[NFAPI_CC_MAX];
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
slice_info_t *sli = &eNB->slice_info;
UE_TEMPLATE *UE_template = 0;
UE_sched_ctrl_t *ue_sched_ctl;
......@@ -467,11 +466,11 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
// Step 1.5: Calculate total_ue_count
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
// This is not the actual CC_id in the list
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
CC_id = UE_info->ordered_ULCCids[n][UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
if (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) {
total_ue_count[CC_id]++;
......@@ -482,16 +481,16 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
// step 2: calculate the average rb per UE
LOG_D(MAC, "In ulsch_preprocessor: step2 \n");
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
if (UE_list->UE_template[CC_id][UE_id].rach_resource_type > 0) continue;
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (UE_info->UE_template[CC_id][UE_id].rach_resource_type > 0) continue;
LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x\n",
UE_id,
rntiTable[UE_id]);
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
CC_id = UE_info->ordered_ULCCids[n][UE_id];
LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x CCid %d\n",
UE_id,
rntiTable[UE_id],
......@@ -502,10 +501,10 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
max_num_ue_to_be_scheduled+=1;
} */
N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx];
available_rbs =
cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], N_RB_UL - first_rb[CC_id] - first_rb_offset);
......@@ -539,22 +538,22 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
// step 3: assigne RBS
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
// if (continueTable[UE_id]) continue;
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
CC_id = UE_info->ordered_ULCCids[n][UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
harq_pid = subframe2harqpid(&RC.mac[module_idP]->common_channels[CC_id],
sched_frameP, sched_subframeP);
// mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,openair_harq_UL);
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid] > 0) {
nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
if (UE_info->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid] > 0) {
nb_allocated_rbs[CC_id][UE_id] = UE_info->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
} else {
nb_allocated_rbs[CC_id][UE_id] =
cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_idx], average_rbs_per_user[CC_id]);
cmin(UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_idx], average_rbs_per_user[CC_id]);
}
total_allocated_rbs[CC_id] += nb_allocated_rbs[CC_id][UE_id];
......@@ -568,16 +567,16 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
// if (continueTable[UE_id]) continue;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
CC_id = UE_info->ordered_ULCCids[n][UE_id];
UE_template = &UE_info->UE_template[CC_id][UE_id];
N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], N_RB_UL - first_rb[CC_id] - first_rb_offset);
total_remaining_rbs[CC_id] = available_rbs - total_allocated_rbs[CC_id];
......@@ -620,7 +619,7 @@ assign_max_mcs_min_rb(module_id_t module_idP,
int mcs;
int rb_table_index = 0, tbs, tx_power;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_info_t *UE_info = &eNB->UE_info;
slice_info_t *sli = &eNB->slice_info;
UE_TEMPLATE *UE_template;
UE_sched_ctrl_t *ue_sched_ctl;
......@@ -628,8 +627,8 @@ assign_max_mcs_min_rb(module_id_t module_idP,
int N_RB_UL;
int first_rb_offset, available_rbs;
for (i = UE_list->head_ul; i >= 0; i = UE_list->next_ul[i]) {
if (UE_list->UE_sched_ctrl[i].phr_received == 1) {
for (i = UE_info->list.head; i >= 0; i = UE_info->list.next[i]) {
if (UE_info->UE_sched_ctrl[i].phr_received == 1) {
/* if we've received the power headroom information the UE, we can go to
* maximum mcs */
mcs = cmin(20, sli->ul[slice_idx].maxmcs);
......@@ -640,18 +639,18 @@ assign_max_mcs_min_rb(module_id_t module_idP,
UE_id = i;
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
CC_id = UE_info->ordered_ULCCids[n][UE_id];
AssertFatal(CC_id < RC.nb_mac_CC[module_idP], "CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u",
CC_id,
NFAPI_CC_MAX,
n,
UE_id,
UE_list->numactiveULCCs[UE_id]);
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_info->numactiveULCCs[UE_id]);
UE_template = &UE_info->UE_template[CC_id][UE_id];
UE_template->pre_assigned_mcs_ul = mcs;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id];
Ncp = eNB->common_channels[CC_id].Ncp;
N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] = nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
......@@ -675,7 +674,7 @@ assign_max_mcs_min_rb(module_id_t module_idP,
tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs
}
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx];
available_rbs =
cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], N_RB_UL - first_rb[CC_id] - first_rb_offset);
......@@ -735,7 +734,7 @@ struct sort_ue_ul_params {
static int ue_ul_compare(const void *_a, const void *_b, void *_params) {
struct sort_ue_ul_params *params = _params;
UE_list_t *UE_list = &RC.mac[params->module_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[params->module_idP]->UE_info;
int UE_id1 = *(const int *) _a;
int UE_id2 = *(const int *) _b;
int rnti1 = UE_RNTI(params->module_idP, UE_id1);
......@@ -753,19 +752,19 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params) {
if (round1 < round2)
return 1;
if (UE_list->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] >
UE_list->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
if (UE_info->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] >
UE_info->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
return -1;
if (UE_list->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] <
UE_list->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
if (UE_info->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] <
UE_info->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
return 1;
int bytes_to_schedule1 = UE_list->UE_template[pCCid1][UE_id1].estimated_ul_buffer - UE_list->UE_template[pCCid1][UE_id1].scheduled_ul_bytes;
int bytes_to_schedule1 = UE_info->UE_template[pCCid1][UE_id1].estimated_ul_buffer - UE_info->UE_template[pCCid1][UE_id1].scheduled_ul_bytes;
if (bytes_to_schedule1 < 0) bytes_to_schedule1 = 0;
int bytes_to_schedule2 = UE_list->UE_template[pCCid2][UE_id2].estimated_ul_buffer - UE_list->UE_template[pCCid2][UE_id2].scheduled_ul_bytes;
int bytes_to_schedule2 = UE_info->UE_template[pCCid2][UE_id2].estimated_ul_buffer - UE_info->UE_template[pCCid2][UE_id2].scheduled_ul_bytes;
if (bytes_to_schedule2 < 0) bytes_to_schedule2 = 0;
......@@ -775,20 +774,20 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params) {
if (bytes_to_schedule1 < bytes_to_schedule2)
return 1;
if (UE_list->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul >
UE_list->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
if (UE_info->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul >
UE_info->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
return -1;
if (UE_list->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul <
UE_list->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
if (UE_info->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul <
UE_info->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
return 1;
if (UE_list->UE_sched_ctrl[UE_id1].cqi_req_timer >
UE_list->UE_sched_ctrl[UE_id2].cqi_req_timer)
if (UE_info->UE_sched_ctrl[UE_id1].cqi_req_timer >
UE_info->UE_sched_ctrl[UE_id2].cqi_req_timer)
return -1;
if (UE_list->UE_sched_ctrl[UE_id1].cqi_req_timer <
UE_list->UE_sched_ctrl[UE_id2].cqi_req_timer)
if (UE_info->UE_sched_ctrl[UE_id1].cqi_req_timer <
UE_info->UE_sched_ctrl[UE_id2].cqi_req_timer)
return 1;
return 0;
......@@ -808,11 +807,11 @@ void sort_ue_ul(module_id_t module_idP,
int list[MAX_MOBILES_PER_ENB];
int list_size = 0;
struct sort_ue_ul_params params = { module_idP, sched_frameP, sched_subframeP };
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
UE_sched_ctrl_t *UE_scheduling_control = NULL;
for (int i = 0; i < MAX_MOBILES_PER_ENB; i++) {
UE_scheduling_control = &(UE_list->UE_sched_ctrl[i]);
UE_scheduling_control = &(UE_info->UE_sched_ctrl[i]);
/* Check CDRX configuration and if UE is in active time for this subframe */
if (UE_scheduling_control->cdrx_configured == TRUE) {
......@@ -824,9 +823,9 @@ void sort_ue_ul(module_id_t module_idP,
rntiTable[i] = UE_RNTI(module_idP, i);
// Valid element and is not the actual CC_id in the list
if (UE_list->active[i] == TRUE &&
if (UE_info->active[i] == TRUE &&
rntiTable[i] != NOT_A_RNTI &&
UE_list->UE_sched_ctrl[i].ul_out_of_sync != 1 &&
UE_info->UE_sched_ctrl[i].ul_out_of_sync != 1 &&
ue_ul_slice_membership(module_idP, i, slice_idx)) {
list[list_size++] = i; // Add to list
}
......@@ -836,12 +835,12 @@ void sort_ue_ul(module_id_t module_idP,
if (list_size) { // At mimimum one list element
for (int i = 0; i < list_size - 1; i++) {
UE_list->next_ul[list[i]] = list[i + 1];
UE_info->list.next[list[i]] = list[i + 1];
}
UE_list->next_ul[list[list_size - 1]] = -1;
UE_list->head_ul = list[0];
UE_info->list.next[list[list_size - 1]] = -1;
UE_info->list.head = list[0];
} else { // No element
UE_list->head_ul = -1;
UE_info->list.head = -1;
}
}
......@@ -91,12 +91,12 @@ int dump_eNB_l2_stats(char *buffer, int length) {
number_of_cards=NB_eNB_INST;
#endif
eNB_MAC_INST *eNB;
UE_list_t *UE_list;
UE_info_t *UE_info;
for (eNB_id=0; eNB_id<number_of_cards; eNB_id++) {
/* reset the values */
eNB = RC.mac[eNB_id];
UE_list = &eNB->UE_list;
UE_info = &eNB->UE_info;
for (CC_id=0 ; CC_id < MAX_NUM_CCs; CC_id++) {
eNB->eNB_stats[CC_id].dlsch_bitrate= 0;
......@@ -141,76 +141,76 @@ int dump_eNB_l2_stats(char *buffer, int length) {
len += sprintf(&buffer[len],"\n");
for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
CC_id=UE_list->ordered_CCids[i][UE_id];
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate=((UE_list->eNB_UE_stats[CC_id][UE_id].TBS*8)/((eNB->frame + 1)*10));
UE_list->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate= ((UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes*8)/((eNB->frame + 1)*10));
UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes+= UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes;
UE_list->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes=((UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes*8)/((eNB->frame + 1)*10));
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_bitrate=((UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS*8)/((eNB->frame + 1)*10));
UE_list->eNB_UE_stats[CC_id][UE_id].total_ulsch_bitrate= ((UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes_rx*8)/((eNB->frame + 1)*10));
for (UE_id=UE_info->list.head; UE_id>=0; UE_id=UE_info->list.next[UE_id]) {
for (i=0; i<UE_info->numactiveCCs[UE_id]; i++) {
CC_id=UE_info->ordered_CCids[i][UE_id];
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate=((UE_info->eNB_UE_stats[CC_id][UE_id].TBS*8)/((eNB->frame + 1)*10));
UE_info->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate= ((UE_info->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes*8)/((eNB->frame + 1)*10));
UE_info->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes+= UE_info->eNB_UE_stats[CC_id][UE_id].overhead_bytes;
UE_info->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes=((UE_info->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes*8)/((eNB->frame + 1)*10));
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_bitrate=((UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS*8)/((eNB->frame + 1)*10));
UE_info->eNB_UE_stats[CC_id][UE_id].total_ulsch_bitrate= ((UE_info->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes_rx*8)/((eNB->frame + 1)*10));
len += sprintf(&buffer[len],"[MAC] UE %d (DLSCH),status %s, RNTI %x : CQI %d, MCS1 %d, MCS2 %d, RB (tx %d, retx %d, total %d), ncce (tx %d, retx %d) \n",
UE_id,
map_int_to_str(rrc_status_names, UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status),
UE_list->eNB_UE_stats[CC_id][UE_id].crnti,
UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id],
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1,
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2,
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used,
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx,
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used,
UE_list->eNB_UE_stats[CC_id][UE_id].ncce_used,
UE_list->eNB_UE_stats[CC_id][UE_id].ncce_used_retx
map_int_to_str(rrc_status_names, UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status),
UE_info->eNB_UE_stats[CC_id][UE_id].crnti,
UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id],
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1,
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2,
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used,
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used_retx,
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used,
UE_info->eNB_UE_stats[CC_id][UE_id].ncce_used,
UE_info->eNB_UE_stats[CC_id][UE_id].ncce_used_retx
);
len += sprintf(&buffer[len],
"[MAC] DLSCH bitrate (TTI %d, avg %d), Transmitted bytes "
"(TTI %d, total %"PRIu64"), Total Transmitted PDU %d, Overhead "
"(TTI %"PRIu64", total %"PRIu64", avg %"PRIu64")\n",
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate,
UE_list->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate,
UE_list->eNB_UE_stats[CC_id][UE_id].TBS,
UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes,
UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus,
UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes,
UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes,
UE_list->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes
UE_info->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate,
UE_info->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate,
UE_info->eNB_UE_stats[CC_id][UE_id].TBS,
UE_info->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes,
UE_info->eNB_UE_stats[CC_id][UE_id].total_num_pdus,
UE_info->eNB_UE_stats[CC_id][UE_id].overhead_bytes,
UE_info->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes,
UE_info->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes
);
len += sprintf(&buffer[len],"[MAC] UE %d (ULSCH), Status %s, Failute timer %d, RNTI %x : snr (%d, target %d), MCS (pre %d, post %d), RB (rx %d, retx %d, total %d), Current TBS %d \n",
UE_id,
map_int_to_str(rrc_status_names, UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status),
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer,
UE_list->eNB_UE_stats[CC_id][UE_id].crnti,
UE_list->eNB_UE_stats[CC_id][UE_id].snr,
UE_list->eNB_UE_stats[CC_id][UE_id].target_snr,
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1,
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2,
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_rx,
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx_rx,
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx,
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS
map_int_to_str(rrc_status_names, UE_info->eNB_UE_stats[CC_id][UE_id].rrc_status),
UE_info->UE_sched_ctrl[UE_id].ul_failure_timer,
UE_info->eNB_UE_stats[CC_id][UE_id].crnti,
UE_info->eNB_UE_stats[CC_id][UE_id].snr,
UE_info->eNB_UE_stats[CC_id][UE_id].target_snr,
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1,
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2,
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used_rx,
UE_info->eNB_UE_stats[CC_id][UE_id].rbs_used_retx_rx,
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx,
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_TBS
);
len += sprintf(&buffer[len],
"[MAC] ULSCH bitrate (TTI %d, avg %d), received bytes (total %"PRIu64"),"
"Total received PDU %d, Total errors %d\n",
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_bitrate,
UE_list->eNB_UE_stats[CC_id][UE_id].total_ulsch_bitrate,
UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes_rx,
UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus_rx,
UE_list->eNB_UE_stats[CC_id][UE_id].num_errors_rx);
len+= sprintf(&buffer[len],"[MAC] Received PHR PH = %d (db)\n", UE_list->UE_template[CC_id][UE_id].phr_info);
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_bitrate,
UE_info->eNB_UE_stats[CC_id][UE_id].total_ulsch_bitrate,
UE_info->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes_rx,
UE_info->eNB_UE_stats[CC_id][UE_id].total_num_pdus_rx,
UE_info->eNB_UE_stats[CC_id][UE_id].num_errors_rx);
len+= sprintf(&buffer[len],"[MAC] Received PHR PH = %d (db)\n", UE_info->UE_template[CC_id][UE_id].phr_info);
len+= sprintf(&buffer[len],"[MAC] Estimated size LCGID[0][1][2][3] = %u %u %u %u\n",
UE_list->UE_template[CC_id][UE_id].ul_buffer_info[LCGID0],
UE_list->UE_template[CC_id][UE_id].ul_buffer_info[LCGID1],
UE_list->UE_template[CC_id][UE_id].ul_buffer_info[LCGID2],
UE_list->UE_template[CC_id][UE_id].ul_buffer_info[LCGID3]
UE_info->UE_template[CC_id][UE_id].ul_buffer_info[LCGID0],
UE_info->UE_template[CC_id][UE_id].ul_buffer_info[LCGID1],
UE_info->UE_template[CC_id][UE_id].ul_buffer_info[LCGID2],
UE_info->UE_template[CC_id][UE_id].ul_buffer_info[LCGID3]
);
}
PROTOCOL_CTXT_SET_BY_MODULE_ID(&ctxt,
eNB_id,
ENB_FLAG_YES,
UE_list->eNB_UE_stats[0][UE_id].crnti,//UE_PCCID(eNB_id,UE_id)][UE_id].crnti,
UE_info->eNB_UE_stats[0][UE_id].crnti,//UE_PCCID(eNB_id,UE_id)][UE_id].crnti,
eNB->frame,
eNB->subframe,
eNB_id);
......@@ -509,7 +509,7 @@ int openair2_stats_read(char *buffer, char **my_buffer, off_t off, int length) {
for(i=1; i<=NB_CNX_CH; i++) {
if (CH_mac_inst[Mod_id].Dcch_lchan[i].Active==1) {
len+=sprintf(&buffer[len],"\nMR index %u: DL SINR (feedback) %d dB, CQI: %s\n\n",
i,//CH_rrc_inst[Mod_id].Info.UE_list[i].L2_id[0],
i,//CH_rrc_inst[Mod_id].Info.UE_info[i].L2_id[0],
CH_mac_inst[Mod_id].Def_meas[i].Wideband_sinr,
print_cqi(CH_mac_inst[Mod_id].Def_meas[i].cqi));
len+=sprintf(&buffer[len],
......
......@@ -656,7 +656,7 @@ rrc_eNB_get_next_transaction_identifier(
// AssertFatal(enb_mod_idP < NB_eNB_INST, "eNB index invalid (%d/%d)!", enb_mod_idP, NB_eNB_INST);
//
// for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
// if (RC.rrc[enb_mod_idP]->Info.UE_list[i] == UE_identity) {
// if (RC.rrc[enb_mod_idP]->Info.UE_info[i] == UE_identity) {
// // UE_identity already registered
// reg = TRUE;
// break;
......@@ -1476,9 +1476,9 @@ rrc_eNB_generate_RRCConnectionReestablishment(
if (UE_id != -1) {
/* Activate reject timer, if RRCComplete not received after 10 frames, reject UE */
RC.mac[module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1;
RC.mac[module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1;
/* Reject UE after 10 frames, LTE_RRCConnectionReestablishmentReject is triggered */
RC.mac[module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres = 100;
RC.mac[module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres = 100;
} else {
LOG_E(RRC, PROTOCOL_RRC_CTXT_UE_FMT" Generating LTE_RRCConnectionReestablishment without UE_id(MAC) rnti %x\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP),
......@@ -2086,8 +2086,8 @@ rrc_eNB_generate_RRCConnectionReestablishmentReject(
int UE_id = find_UE_id(ctxt_pP->module_id, ctxt_pP->rnti);
if(UE_id != -1) {
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1;
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres = 20;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres = 20;
} else {
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" Generating LTE_RRCConnectionReestablishmentReject without UE_id(MAC) rnti %x\n",
......@@ -6168,7 +6168,7 @@ rrc_eNB_process_RRCConnectionReconfigurationComplete(
return;
}
UE_sched_ctrl_t *UE_scheduling_control = &(RC.mac[module_id]->UE_list.UE_sched_ctrl[UE_id_mac]);
UE_sched_ctrl_t *UE_scheduling_control = &(RC.mac[module_id]->UE_info.UE_sched_ctrl[UE_id_mac]);
if (UE_scheduling_control->cdrx_waiting_ack == TRUE) {
UE_scheduling_control->cdrx_waiting_ack = FALSE;
......@@ -6831,12 +6831,12 @@ rrc_eNB_decode_ccch(
break;
}
if((RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)) {
if((RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)) {
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" RCConnectionReestablishmentComplete(Previous) don't receive, delete the c-rnti UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP));
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
rrc_eNB_previous_SRB2(ue_context_p);
ue_context_p->ue_context.ue_reestablishment_timer = 0;
}
......@@ -6862,12 +6862,12 @@ rrc_eNB_decode_ccch(
break;
}
if((RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)) {
if((RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)) {
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" RCConnectionReestablishmentComplete(Previous) don't receive, delete the Previous UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP));
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
rrc_eNB_previous_SRB2(ue_context_p);
ue_context_p->ue_context.ue_reestablishment_timer = 0;
}
......@@ -7439,12 +7439,12 @@ rrc_eNB_decode_dcch(
AssertFatal(!NODE_IS_CU(RC.rrc[ctxt_pP->module_id]->node_type),
"CU cannot decode DCCH: no access to RC.mac[]\n");
if(RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag == 1) {
if(RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag == 1) {
LOG_I(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" UE State = RRC_RECONFIGURED (dedicated DRB, xid %ld) C-RNTI Complete\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP),ul_dcch_msg->message.choice.c1.choice.rrcConnectionReconfigurationComplete.rrc_TransactionIdentifier);
dedicated_DRB = 2;
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0;
}
} else if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION) {
int16_t UE_id = find_UE_id(ctxt_pP->module_id, ctxt_pP->rnti);
......@@ -7459,7 +7459,7 @@ rrc_eNB_decode_dcch(
flexran_agent_handover = 1;
RC.rrc[ctxt_pP->module_id]->Nb_ue++;
dedicated_DRB = 3;
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0;
ue_context_p->ue_context.Status = RRC_RECONFIGURED;
if(ue_context_p->ue_context.handover_info) {
......@@ -7637,7 +7637,7 @@ rrc_eNB_decode_dcch(
break;
}
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
RC.mac[ctxt_pP->module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
ue_context_p->ue_context.reestablishment_xid = -1;
if (ul_dcch_msg->message.choice.c1.choice.rrcConnectionReestablishmentComplete.criticalExtensions.present ==
......
......@@ -60,11 +60,11 @@ uint8_t rrc_find_free_ue_index(uint8_t Mod_id) {
uint16_t i;
for(i=1; i<=NB_CNX_CH; i++)
if ( (CH_rrc_inst[Mod_id].Info.UE_list[i][0] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_list[i][1] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_list[i][2] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_list[i][3] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_list[i][4] == 0)) {
if ( (CH_rrc_inst[Mod_id].Info.UE_info[i][0] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_info[i][1] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_info[i][2] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_info[i][3] == 0) &&
(CH_rrc_inst[Mod_id].Info.UE_info[i][4] == 0)) {
return i;
}
......@@ -78,7 +78,7 @@ unsigned short rrc_find_ue_index(unsigned char Mod_id, L2_ID Mac_id) {
unsigned char i;
/*
for(i=0;i<=NB_CNX_CH;i++)
if( bcmp(Mac_id.L2_id,CH_rrc_inst[Mod_id].Info.UE_list[i].L2_id,sizeof(L2_ID))==0)
if( bcmp(Mac_id.L2_id,CH_rrc_inst[Mod_id].Info.UE_info[i].L2_id,sizeof(L2_ID))==0)
return i;
return i;
*/
......@@ -167,7 +167,7 @@ unsigned char rrc_is_mobile_already_associated(uint8_t Mod_id, L2_ID Mac_id) {
/*
unsigned char i;
for(i=0;i<NB_CNX_CH;i++)
if( bcmp(Mac_id.L2_id,CH_rrc_inst[Mod_id].Info.UE_list[i].L2_id,sizeof(L2_ID))==0)
if( bcmp(Mac_id.L2_id,CH_rrc_inst[Mod_id].Info.UE_info[i].L2_id,sizeof(L2_ID))==0)
return 1;
return 0;
*/
......
......@@ -235,10 +235,10 @@ static inline int rxtx(PHY_VARS_eNB *eNB,
new_dlsch_ue_select_tbl_in_use = dlsch_ue_select_tbl_in_use;
dlsch_ue_select_tbl_in_use = !dlsch_ue_select_tbl_in_use;
// L2-emulator can work only one eNB.
// memcpy(&pre_scd_eNB_UE_stats,&RC.mac[ru->eNB_list[0]->Mod_id]->UE_list.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
// memcpy(&pre_scd_activeUE, &RC.mac[ru->eNB_list[0]->Mod_id]->UE_list.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_eNB_UE_stats,&RC.mac[0]->UE_list.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_activeUE, &RC.mac[0]->UE_list.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
// memcpy(&pre_scd_eNB_UE_stats,&RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
// memcpy(&pre_scd_activeUE, &RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_eNB_UE_stats,&RC.mac[0]->UE_info.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_activeUE, &RC.mac[0]->UE_info.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
AssertFatal((ret= pthread_mutex_lock(&ru->proc.mutex_pre_scd))==0,"[eNB] error locking proc mutex for eNB pre scd, return %d\n",ret);
ru->proc.instance_pre_scd++;
......
......@@ -1866,8 +1866,8 @@ static void *ru_thread( void *param ) {
#if defined(PRE_SCD_THREAD)
new_dlsch_ue_select_tbl_in_use = dlsch_ue_select_tbl_in_use;
dlsch_ue_select_tbl_in_use = !dlsch_ue_select_tbl_in_use;
memcpy(&pre_scd_eNB_UE_stats,&RC.mac[ru->eNB_list[0]->Mod_id]->UE_list.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_activeUE, &RC.mac[ru->eNB_list[0]->Mod_id]->UE_list.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_eNB_UE_stats,&RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_activeUE, &RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.active, sizeof(boolean_t)*NUMBER_OF_UE_MAX);
AssertFatal((ret=pthread_mutex_lock(&ru->proc.mutex_pre_scd))==0,"[eNB] error locking proc mutex for eNB pre scd\n");
ru->proc.instance_pre_scd++;
......
......@@ -435,7 +435,7 @@ int restart_L1L2(module_id_t enb_id) {
set_function_spec_param(RC.ru[enb_id]);
/* reset the list of connected UEs in the MAC, since in this process with
* loose all UEs (have to reconnect) */
init_UE_list(&RC.mac[enb_id]->UE_list);
init_UE_info(&RC.mac[enb_id]->UE_info);
LOG_I(ENB_APP, "attempting to create ITTI tasks\n");
if (itti_create_task (TASK_RRC_ENB, rrc_enb_task, NULL) < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment