Commit bfc3912f authored by Raphael Defosseux's avatar Raphael Defosseux

Merge remote-tracking branch 'origin/loadtester_bugfixes' into develop_integration_2019_w30

parents 7fd9e1a5 8976f988
......@@ -384,9 +384,8 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB,
unsigned int G;
unsigned int crc=1;
unsigned char harq_pid = dlsch->harq_ids[frame%2][subframe];
if(harq_pid >= dlsch->Mdlharq) {
LOG_E(PHY,"dlsch_encoding_2threads illegal harq_pid %d\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch->Mdlharq)) {
LOG_E(PHY,"dlsch_encoding_2threads illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return(-1);
}
......@@ -676,9 +675,8 @@ int dlsch_encoding(PHY_VARS_eNB *eNB,
unsigned int crc=1;
LTE_DL_FRAME_PARMS *frame_parms = &eNB->frame_parms;
unsigned char harq_pid = dlsch->harq_ids[frame%2][subframe];
if(harq_pid >= dlsch->Mdlharq) {
LOG_E(PHY,"dlsch_encoding illegal harq_pid %d\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch->Mdlharq)) {
LOG_E(PHY,"dlsch_encoding illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return(-1);
}
......
......@@ -2257,8 +2257,8 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB,
if ((dlsch0 != NULL) && (dlsch1 != NULL)){
harq_pid = dlsch0->harq_ids[frame%2][subframe_offset];
if(harq_pid >= dlsch0->Mdlharq) {
LOG_E(PHY,"illegal harq_pid %d\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return(-1);
}
dlsch0_harq = dlsch0->harq_processes[harq_pid];
......@@ -2278,8 +2278,8 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB,
}else if ((dlsch0 != NULL) && (dlsch1 == NULL)){
harq_pid = dlsch0->harq_ids[frame%2][subframe_offset];
if(harq_pid >= dlsch0->Mdlharq) {
LOG_E(PHY,"illegal harq_pid %d\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return(-1);
}
dlsch0_harq = dlsch0->harq_processes[harq_pid];
......@@ -2299,8 +2299,8 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB,
}else if ((dlsch0 == NULL) && (dlsch1 != NULL)){
harq_pid = dlsch1->harq_ids[frame%2][subframe_offset];
if(harq_pid >= dlsch1->Mdlharq) {
LOG_E(PHY,"illegal harq_pid %d\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch1->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return(-1);
}
dlsch1_harq = dlsch1->harq_processes[harq_pid];
......
......@@ -186,21 +186,11 @@ void handle_nfapi_dlsch_pdu(PHY_VARS_eNB *eNB,int frame,int subframe,L1_rxtx_pro
#endif
harq_pid = dlsch0->harq_ids[proc->frame_tx%2][proc->subframe_tx];
AssertFatal((harq_pid>=0) && (harq_pid<8),"harq_pid %d not in 0...7 frame:%d subframe:%d subframe(TX):%d rnti:%x UE_id:%d dlsch0[harq_ids:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d]\n",
harq_pid,
frame,subframe,
proc->subframe_tx,rel8->rnti,UE_id,
dlsch0->harq_ids[proc->frame_tx%2][0],
dlsch0->harq_ids[proc->frame_tx%2][1],
dlsch0->harq_ids[proc->frame_tx%2][2],
dlsch0->harq_ids[proc->frame_tx%2][3],
dlsch0->harq_ids[proc->frame_tx%2][4],
dlsch0->harq_ids[proc->frame_tx%2][5],
dlsch0->harq_ids[proc->frame_tx%2][6],
dlsch0->harq_ids[proc->frame_tx%2][7],
dlsch0->harq_ids[proc->frame_tx%2][8],
dlsch0->harq_ids[proc->frame_tx%2][9]
);
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return;
}
dlsch0_harq = dlsch0->harq_processes[harq_pid];
dlsch1_harq = dlsch1->harq_processes[harq_pid];
AssertFatal(dlsch0_harq!=NULL,"dlsch_harq is null\n");
......
......@@ -506,9 +506,9 @@ void phy_procedures_eNB_TX(PHY_VARS_eNB *eNB,
) {
// get harq_pid
harq_pid = dlsch0->harq_ids[frame%2][subframe];
AssertFatal(harq_pid>=0,"harq_pid is negative\n");
//AssertFatal(harq_pid>=0,"harq_pid is negative\n");
if (harq_pid>=8) {
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
if (dlsch0->ue_type==0)
......@@ -1512,8 +1512,10 @@ static void do_release_harq(PHY_VARS_eNB *eNB,
harq_pid = dlsch0->harq_ids[frame_tx%2][subframe_tx];
AssertFatal((harq_pid >= 0) && (harq_pid < 8),"harq_pid %d not in 0...7\n", harq_pid);
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return;
}
dlsch0_harq = dlsch0->harq_processes[harq_pid];
dlsch1_harq = dlsch1->harq_processes[harq_pid];
......@@ -1562,7 +1564,10 @@ static void do_release_harq(PHY_VARS_eNB *eNB,
if (((1 << m) & mask) > 0) {
harq_pid = dlsch0->harq_ids[frame_tx%2][subframe_tx];
if ((harq_pid >= 0) && (harq_pid < dlsch0->Mdlharq)) {
if((harq_pid < 0) || (harq_pid >= dlsch0->Mdlharq)) {
LOG_E(PHY,"illegal harq_pid %d %s:%d\n", harq_pid, __FILE__, __LINE__);
return;
}
dlsch0_harq = dlsch0->harq_processes[harq_pid];
dlsch1_harq = dlsch1->harq_processes[harq_pid];
......@@ -1592,7 +1597,6 @@ static void do_release_harq(PHY_VARS_eNB *eNB,
dlsch0->harq_mask &= ~(1 << harq_pid);
}
}
} // end if ((harq_pid >= 0) && (harq_pid < dlsch0->Mdlharq))
} // end if (((1 << m) & mask) > 0)
} // end for (int m=0; m < M; m++)
} // end if TDD
......@@ -1631,7 +1635,7 @@ int getM(PHY_VARS_eNB *eNB,int frame,int subframe) {
harq_pid = dlsch0->harq_ids[frame_tx%2][subframe_tx];
if (harq_pid>=0 && harq_pid<10) {
if (harq_pid>=0 && harq_pid<dlsch0->Mdlharq) {
dlsch0_harq = dlsch0->harq_processes[harq_pid];
dlsch1_harq = dlsch1->harq_processes[harq_pid];
AssertFatal(dlsch0_harq!=NULL,"dlsch0_harq is null\n");
......
......@@ -126,9 +126,11 @@ void schedule_SRS(module_id_t module_idP,
continue;
}
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated != NULL,
"physicalConfigDedicated is null for UE %d\n",
UE_id);
if(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated == NULL) {
LOG_E(MAC,"physicalConfigDedicated is null for UE %d\n",UE_id);
printf("physicalConfigDedicated is null for UE %d\n",UE_id);
return;
}
/* CDRX condition on Active Time and SRS type-0 report (36.321 5.7) */
UE_scheduling_control = &(UE_list->UE_sched_ctrl[UE_id]);
......@@ -506,6 +508,13 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync = 1;
}
} // ul_failure_timer>0
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer++;
if((U_PLANE_INACTIVITY_VALUE != 0) && (UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer > (U_PLANE_INACTIVITY_VALUE * 10))){
LOG_D(MAC,"UE %d rnti %x: U-Plane Failure after repeated PDCCH orders: Triggering RRC \n",UE_id,rnti);
mac_eNB_rrc_uplane_failure(module_idP,CC_id,frameP,subframeP,rnti);
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}// time > 60s
}
void
......
......@@ -957,9 +957,12 @@ generate_Msg4(module_id_t module_idP,
1, // tpc, none
getRIV(N_RB_DL, first_rb, 4), // resource_block_coding
ra->msg4_mcs, // mcs
1, // ndi
1 - UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
0, // rv
0); // vrb_flag
UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid] = 1 - UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid];
LOG_D(MAC,
"Frame %d, subframe %d: Msg4 DCI pdu_num %d (rnti %x,rnti_type %d,harq_pid %d, resource_block_coding (%p) %d\n",
frameP, subframeP, dl_req_body->number_pdu,
......@@ -1194,7 +1197,7 @@ check_Msg4_retransmission(module_id_t module_idP, int CC_idP,
1, // tpc, none
getRIV(N_RB_DL, first_rb, 4), // resource_block_coding
ra->msg4_mcs, // mcs
1, // ndi
UE_list->UE_template[CC_idP][UE_id].oldNDI[ra->harq_pid],
round & 3, // rv
0); // vrb_flag
......
......@@ -717,9 +717,12 @@ schedule_ue_spec(module_id_t module_idP,
eNB_UE_stats->harq_pid = harq_pid;
eNB_UE_stats->harq_round = round_DL;
if (eNB_UE_stats->rrc_status < RRC_RECONFIGURED) {
ue_sched_ctrl->uplane_inactivity_timer = 0;
}
if (eNB_UE_stats->rrc_status < RRC_CONNECTED) {
LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n",
UE_id);
LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n", UE_id);
continue;
}
......@@ -1006,9 +1009,11 @@ schedule_ue_spec(module_id_t module_idP,
, 0
#endif
);
pthread_mutex_lock(&rrc_release_freelist);
if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)) {
while(pthread_mutex_trylock(&rrc_release_freelist)){
/* spin... */
}
uint16_t release_total = 0;
for (release_num = 0, release_ctrl = &rrc_release_info.RRC_release_ctrl[0];
......@@ -1055,9 +1060,9 @@ schedule_ue_spec(module_id_t module_idP,
if(release_total >= rrc_release_info.num_UEs)
break;
}
pthread_mutex_unlock(&rrc_release_freelist);
}
pthread_mutex_unlock(&rrc_release_freelist);
for (ra_ii = 0, ra = &eNB->common_channels[CC_id].ra[0]; ra_ii < NB_RA_PROC_MAX; ra_ii++, ra++) {
if ((ra->rnti == rnti) && (ra->state == MSGCRNTI)) {
......@@ -3084,11 +3089,9 @@ schedule_PCH(module_id_t module_idP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1A;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = 0;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // no TPC
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_rb_dl,
first_rb,
4);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 0;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_rb_dl, first_rb, 4);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.virtual_resource_block_assignment_flag = 0;
#endif
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = 4;
......
......@@ -585,6 +585,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
// uint16_t r1=0;
uint8_t CC_id;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int N_RB_DL;
UE_sched_ctrl_t *ue_sched_ctl;
// int rrc_status = RRC_IDLE;
COMMON_channels_t *cc;
......@@ -640,7 +641,18 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
average_rbs_per_user[CC_id] = 0;
cc = &RC.mac[Mod_id]->common_channels[CC_id];
// Get total available RBS count and total UE count
temp_total_rbs_count = RC.mac[Mod_id]->eNB_stats[CC_id].available_prbs;
N_RB_DL = to_prb(cc->mib->message.dl_Bandwidth);
temp_total_rbs_count = 0;
for(uint8_t rbg_i = 0;rbg_i < N_RBG[CC_id];rbg_i++ ){
if(rballoc_sub[CC_id][rbg_i] == 0){
if((rbg_i == N_RBG[CC_id] -1) &&
((N_RB_DL == 25) || (N_RB_DL == 50))){
temp_total_rbs_count += (min_rb_unit[CC_id] -1);
}else{
temp_total_rbs_count += min_rb_unit[CC_id];
}
}
}
temp_total_ue_count = dlsch_ue_select[CC_id].ue_num;
for (i = 0; i < dlsch_ue_select[CC_id].ue_num; i++) {
......@@ -1001,6 +1013,10 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid;
UE_list->eNB_UE_stats[CC_id][UE_id].harq_round = round;
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_RECONFIGURED) {
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status <
RRC_CONNECTED)
continue;
......@@ -1287,9 +1303,11 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
,0, 0
#endif
);
pthread_mutex_lock(&rrc_release_freelist);
if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)) {
if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)){
while(pthread_mutex_trylock(&rrc_release_freelist)) {
/* spin... */
}
uint16_t release_total = 0;
for(uint16_t release_num = 0; release_num < NUMBER_OF_UE_MAX; release_num++) {
......@@ -1326,9 +1344,9 @@ schedule_ue_spec_fairRR(module_id_t module_idP,
if(release_total >= rrc_release_info.num_UEs)
break;
}
pthread_mutex_unlock(&rrc_release_freelist);
}
pthread_mutex_unlock(&rrc_release_freelist);
RA_t *ra = &eNB->common_channels[CC_id].ra[0];
for (uint8_t ra_ii = 0; ra_ii < NB_RA_PROC_MAX; ra_ii++) {
......@@ -1972,7 +1990,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
uint8_t cc_id_flag[MAX_NUM_CCs];
uint8_t harq_pid = 0,round = 0;
UE_list_t *UE_list= &eNB->UE_list;
uint8_t aggregation = 2;
uint8_t aggregation;
int format_flag;
nfapi_hi_dci0_request_body_t *HI_DCI0_req;
nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu;
......@@ -2031,7 +2049,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
if ( round > 0 ) {
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
continue;
......@@ -2110,7 +2128,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
rnti = UE_RNTI(module_idP,first_ue_id[CC_id][temp]);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[first_ue_id[CC_id][temp]].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
break;
......@@ -2182,7 +2200,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED)) ) {
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
continue;
......@@ -2235,7 +2253,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
rnti = UE_RNTI(module_idP,ul_inactivity_id[CC_id][temp]);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_list->UE_sched_ctrl[ul_inactivity_id[CC_id][temp]].dl_cqi[CC_id],format0);
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
cc_id_flag[CC_id] = 1;
continue;
......@@ -2434,12 +2452,20 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP,
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs;
}
} else {
if (mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP, UE_id)) < RRC_CONNECTED){
// assigne RBS( 6 RBs)
first_rb[CC_id] = first_rb[CC_id] + 6;
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 6;
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 5;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
}else{
// assigne RBS( 3 RBs)
first_rb[CC_id] = first_rb[CC_id] + 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3;
UE_list->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2;
UE_list->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10;
}
}
} else if ( ulsch_ue_select[CC_id].list[ulsch_ue_num].ue_priority == SCH_UL_INACTIVE ) {
// assigne RBS( 3 RBs)
first_rb[CC_id] = first_rb[CC_id] + 3;
......@@ -2609,7 +2635,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
unsigned char sched_subframeP,
ULSCH_UE_SELECT ulsch_ue_select[MAX_NUM_CCs]) {
int16_t UE_id;
uint8_t aggregation = 2;
uint8_t aggregation;
uint16_t first_rb[MAX_NUM_CCs];
uint8_t ULSCH_first_end;
rnti_t rnti = -1;
......@@ -2738,6 +2764,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
harq_pid = subframe2harqpid(cc,sched_frame,sched_subframeP);
rnti = UE_RNTI(CC_id,UE_id);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),UE_sched_ctrl[UE_id].dl_cqi[CC_id],format0);
LOG_D(MAC,"[eNB %d] frame %d subframe %d,Checking PUSCH %d for UE %d/%x CC %d : aggregation level %d, N_RB_UL %d\n",
module_idP,frameP,subframeP,harq_pid,UE_id,rnti,CC_id, aggregation,N_RB_UL);
int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes;
......
......@@ -2193,8 +2193,8 @@ add_new_ue(module_id_t mod_idP,
UE_list->UE_sched_ctrl[UE_id].ta_update = 31;
for (j = 0; j < 8; j++) {
UE_list->UE_template[cc_idP][UE_id].oldNDI[j] = (j == 0) ? 1 : 0; // 1 because first transmission is with format1A (Msg4) for harq_pid 0
UE_list->UE_template[cc_idP][UE_id].oldNDI_UL[j] = (j == harq_pidP) ? 0 : 1; // 1st transmission is with Msg3;
UE_list->UE_template[cc_idP][UE_id].oldNDI[j] = 0;
UE_list->UE_template[cc_idP][UE_id].oldNDI_UL[j] = 0;
UE_list->UE_sched_ctrl[UE_id].round[cc_idP][j] = 8;
UE_list->UE_sched_ctrl[UE_id].round_UL[cc_idP][j] = 0;
}
......@@ -2319,9 +2319,10 @@ rrc_mac_remove_ue(module_id_t mod_idP,
rntiP);
}
pthread_mutex_lock(&rrc_release_freelist);
if (rrc_release_info.num_UEs > 0) {
if(rrc_release_info.num_UEs > 0){
while(pthread_mutex_trylock(&rrc_release_freelist)) {
/* spin... */
}
uint16_t release_total = 0;
for (uint16_t release_num = 0; release_num < NUMBER_OF_UE_MAX; release_num++) {
......@@ -2341,6 +2342,7 @@ rrc_mac_remove_ue(module_id_t mod_idP,
break;
}
}
pthread_mutex_unlock(&rrc_release_freelist);
}
pthread_mutex_unlock(&rrc_release_freelist);
......
......@@ -460,7 +460,7 @@ rx_sdu(const module_id_t enb_mod_idP,
if (RA_id != -1) {
RA_t *ra = &(mac->common_channels[CC_idP].ra[RA_id]);
mac_rrc_data_ind(enb_mod_idP,
int8_t ret = mac_rrc_data_ind(enb_mod_idP,
CC_idP,
frameP, subframeP,
UE_id,
......@@ -473,6 +473,7 @@ rx_sdu(const module_id_t enb_mod_idP,
,ra->rach_resource_type > 0
#endif
);
if (ret == 0) {
/* Received a new rnti */
ra->state = MSGCRNTI;
LOG_I(MAC, "[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) Received rnti(Msg4)\n",
......@@ -502,6 +503,9 @@ rx_sdu(const module_id_t enb_mod_idP,
}
UE_template_ptr->ul_SR = 1;
UE_scheduling_control->crnti_reconfigurationcomplete_flag = 1;
} else {
cancel_ra_proc(enb_mod_idP, CC_idP, frameP,current_rnti);
}
// break;
}
}
......@@ -797,6 +801,10 @@ rx_sdu(const module_id_t enb_mod_idP,
mac_rlc_data_ind(enb_mod_idP, current_rnti, enb_mod_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, rx_lcids[i], (char *) payload_ptr, rx_lengths[i], 1, NULL); //(unsigned int*)crc_status);
UE_list->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_list->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
if (mac_eNB_get_rrc_status(enb_mod_idP, current_rnti) < RRC_RECONFIGURED) {
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
}
}
break;
......
......@@ -169,7 +169,7 @@
#define MAX_NUM_SLICES 10
#define U_PLANE_INACTIVITY_VALUE 6000
#define U_PLANE_INACTIVITY_VALUE 0 /* defined 10ms order (zero means infinity) */
/*
* eNB part
......
......@@ -358,8 +358,6 @@ void rlc_am_segment_10 (
PROTOCOL_RLC_AM_CTXT_ARGS(ctxt_pP,rlc_pP),
pdu_remaining_size);
//msg ("[FRAME %05d][%s][RLC_AM][MOD %u/%u][RB %u][SEGMENT] pdu_mem_p %p pdu_p %p pdu_p->data %p data %p data_sdu_p %p pdu_remaining_size %d\n", rlc_pP->module_id, rlc_pP->rb_id, ctxt_pP->frame, pdu_mem_p, pdu_p, pdu_p->data, data, data_sdu_p,pdu_remaining_size);
rlc_am_mui.rrc_mui[rlc_am_mui.rrc_mui_num] = sdu_mngt_p->mui;
rlc_am_mui.rrc_mui_num++;
memcpy(data, data_sdu_p, pdu_remaining_size);
pdu_mngt_p->payload_size += pdu_remaining_size;
......@@ -397,8 +395,6 @@ void rlc_am_segment_10 (
continue_fill_pdu_with_sdu = 0;
pdu_remaining_size = 0;
} else if ((sdu_mngt_p->sdu_remaining_size + (li_length_in_bytes ^ 3)) < pdu_remaining_size ) {
rlc_am_mui.rrc_mui[rlc_am_mui.rrc_mui_num] = sdu_mngt_p->mui;
rlc_am_mui.rrc_mui_num++;
if (fill_num_li == (RLC_AM_MAX_SDU_IN_PDU - 1)) {
LOG_T(RLC, PROTOCOL_RLC_AM_CTXT_FMT"[SEGMENT] [SIZE %d] REACHING RLC_AM_MAX_SDU_IN_PDU LIs -> STOP SEGMENTATION FOR THIS PDU SDU\n",
PROTOCOL_RLC_AM_CTXT_ARGS(ctxt_pP,rlc_pP),
......@@ -416,6 +412,8 @@ void rlc_am_segment_10 (
// reduce the size of the PDU
continue_fill_pdu_with_sdu = 0;
fi_last_byte_pdu_is_last_byte_sdu = 1;
rlc_am_mui.rrc_mui[rlc_am_mui.rrc_mui_num] = sdu_mngt_p->mui;
rlc_am_mui.rrc_mui_num++;
} else {
LOG_T(RLC, PROTOCOL_RLC_AM_CTXT_FMT"[SEGMENT] Filling PDU with %d all remaining bytes of SDU\n",
PROTOCOL_RLC_AM_CTXT_ARGS(ctxt_pP,rlc_pP),
......
......@@ -309,12 +309,15 @@ mac_rrc_data_ind(
ue_context_p = rrc_eNB_get_ue_context(RC.rrc[ctxt.module_id],rntiP);
if(ue_context_p) {
rrc_eNB_generate_defaultRRCConnectionReconfiguration(&ctxt,
ue_context_p,
0);
if (ue_context_p->ue_context.Status != RRC_RECONFIGURED) {
LOG_E(RRC,"[eNB %d] Received C-RNTI ,but UE %x status(%d) not RRC_RECONFIGURED\n",module_idP,rntiP,ue_context_p->ue_context.Status);
return (-1);
} else {
rrc_eNB_generate_defaultRRCConnectionReconfiguration(&ctxt,ue_context_p,0);
ue_context_p->ue_context.Status = RRC_RECONFIGURED;
}
}
}
return(0);
}
......@@ -362,8 +365,7 @@ void mac_eNB_rrc_ul_failure(const module_id_t Mod_instP,
flexran_agent_get_rrc_xface(Mod_instP)->flexran_agent_notify_ue_state_change(Mod_instP,
rntiP, PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_DEACTIVATED);
}
rrc_mac_remove_ue(Mod_instP,rntiP);
//rrc_mac_remove_ue(Mod_instP,rntiP);
}
void mac_eNB_rrc_uplane_failure(const module_id_t Mod_instP,
......
......@@ -4072,6 +4072,7 @@ do_RRCConnectionReestablishment(
LTE_DL_CCCH_Message_t dl_ccch_msg;
LTE_RRCConnectionReestablishment_t *rrcConnectionReestablishment = NULL;
int i = 0;
ue_context_pP->ue_context.reestablishment_xid = Transaction_id;
LTE_SRB_ToAddModList_t **SRB_configList2 = NULL;
SRB_configList2 = &ue_context_pP->ue_context.SRB_configList2[Transaction_id];
......
......@@ -1241,6 +1241,34 @@ void release_UE_in_freeList(module_id_t mod_id) {
}
}
int rrc_eNB_previous_SRB2(rrc_eNB_ue_context_t* ue_context_pP)
{
struct LTE_SRB_ToAddMod *SRB2_config = NULL;
uint8_t i;
LTE_SRB_ToAddModList_t* SRB_configList = ue_context_pP->ue_context.SRB_configList;
LTE_SRB_ToAddModList_t** SRB_configList2 = &ue_context_pP->ue_context.SRB_configList2[ue_context_pP->ue_context.reestablishment_xid];
if (*SRB_configList2 != NULL) {
if((*SRB_configList2)->list.count!=0){
LOG_D(RRC, "rrc_eNB_previous_SRB2 SRB_configList2(%p) count is %d\n SRB_configList2->list.array[0] addr is %p",
SRB_configList2, (*SRB_configList2)->list.count, (*SRB_configList2)->list.array[0]);
}
for (i = 0; (i < (*SRB_configList2)->list.count) && (i < 3); i++) {
if ((*SRB_configList2)->list.array[i]->srb_Identity == 2 ){
SRB2_config = (*SRB_configList2)->list.array[i];
break;
}
}
}else{
LOG_E(RRC, "rrc_eNB_previous_SRB2 SRB_configList2 NULL\n");
}
if (SRB2_config != NULL) {
ASN_SEQUENCE_ADD(&SRB_configList->list, SRB2_config);
}else{
LOG_E(RRC, "rrc_eNB_previous_SRB2 SRB2_config NULL\n");
}
return 0;
}
//-----------------------------------------------------------------------------
/*
* Process the rrc connection setup complete message from UE (SRB1 Active)
......@@ -1556,10 +1584,6 @@ rrc_eNB_generate_RRCConnectionReestablishment(
rnti);
}
/* Activate release timer, if RRCComplete not received after 100 frames, remove UE */
ue_context_pP->ue_context.ue_reestablishment_timer = 1;
/* Remove UE after 100 frames after LTE_RRCConnectionReestablishmentReject is triggered */
ue_context_pP->ue_context.ue_reestablishment_timer_thres = 1000;
}
//-----------------------------------------------------------------------------
......@@ -1606,6 +1630,8 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
LTE_C_RNTI_t *cba_RNTI = NULL;
int measurements_enabled;
uint8_t next_xid = rrc_eNB_get_next_transaction_identifier(ctxt_pP->module_id);
int ret = 0;
ue_context_pP->ue_context.Status = RRC_CONNECTED;
ue_context_pP->ue_context.ue_rrc_inactivity_timer = 1; // set rrc inactivity when UE goes into RRC_CONNECTED
ue_context_pP->ue_context.reestablishment_xid = next_xid;
......@@ -1711,10 +1737,36 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
create_tunnel_req.rnti = ctxt_pP->rnti; // warning put zero above
create_tunnel_req.num_tunnels = j;
gtpv1u_update_s1u_tunnel(
ret = gtpv1u_update_s1u_tunnel(
ctxt_pP->instance,
&create_tunnel_req,
reestablish_rnti);
if ( ret != 0 ) {
LOG_E(RRC,"gtpv1u_update_s1u_tunnel failed,start to release UE %x\n",reestablish_rnti);
// update s1u tunnel failed,reset rnti?
if (eNB_ue_s1ap_id > 0) {
h_rc = hashtable_get(rrc_instance_p->s1ap_id2_s1ap_ids, (hash_key_t)eNB_ue_s1ap_id, (void**)&rrc_ue_s1ap_ids_p);
if (h_rc == HASH_TABLE_OK ) {
rrc_ue_s1ap_ids_p->ue_rnti = reestablish_rnti;
}
}
if (ue_initial_id != 0) {
h_rc = hashtable_get(rrc_instance_p->initial_id2_s1ap_ids, (hash_key_t)ue_initial_id, (void**)&rrc_ue_s1ap_ids_p);
if (h_rc == HASH_TABLE_OK ) {
rrc_ue_s1ap_ids_p->ue_rnti = reestablish_rnti;
}
}
ue_context_pP->ue_context.ue_release_timer_s1 = 1;
ue_context_pP->ue_context.ue_release_timer_thres_s1 = 100;
ue_context_pP->ue_context.ue_release_timer = 0;
ue_context_pP->ue_context.ue_reestablishment_timer = 0;
ue_context_pP->ue_context.ul_failure_timer = 20000; // set ul_failure to 20000 for triggering rrc_eNB_send_S1AP_UE_CONTEXT_RELEASE_REQ
rrc_eNB_free_UE(ctxt_pP->module_id,ue_context_pP);
ue_context_pP->ue_context.ul_failure_timer = 0;
put_UE_in_freelist(ctxt_pP->module_id, ctxt_pP->rnti, 0);
return;
}
} /* EPC_MODE_ENABLED */
/* Update RNTI in ue_context */
......@@ -2236,8 +2288,9 @@ rrc_eNB_generate_RRCConnectionRelease(
ue_context_pP->ue_context.rnti,
rrc_eNB_mui,
size);
pthread_mutex_lock(&rrc_release_freelist);
while (pthread_mutex_trylock(&rrc_release_freelist)) {
/* spin... */
}
for (uint16_t release_num = 0; release_num < NUMBER_OF_UE_MAX; release_num++) {
if (rrc_release_info.RRC_release_ctrl[release_num].flag == 0) {
if (ue_context_pP->ue_context.ue_release_timer_s1 > 0) {
......@@ -7194,14 +7247,50 @@ rrc_eNB_decode_ccch(
rrc_eNB_generate_RRCConnectionReestablishmentReject(ctxt_pP, ue_context_p, CC_id);
break;
}
if((RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)){
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" RCConnectionReestablishmentComplete(Previous) don't receive, delete the c-rnti UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP));
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
rrc_eNB_previous_SRB2(ue_context_p);
ue_context_p->ue_context.ue_reestablishment_timer = 0;
}
//previous rnti
rnti_t previous_rnti = 0;
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (reestablish_rnti_map[i][1] == c_rnti) {
previous_rnti = reestablish_rnti_map[i][0];
break;
}
}
if(previous_rnti != 0){
UE_id = find_UE_id(ctxt_pP->module_id, previous_rnti);
if(UE_id == -1){
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" RRCConnectionReestablishmentRequest without UE_id(MAC) previous rnti %x, let's reject the UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP),previous_rnti);
rrc_eNB_generate_RRCConnectionReestablishmentReject(ctxt_pP, ue_context_p, CC_id);
break;
}
if((RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer > 0) &&
(RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer_thres > 20)) {
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" RCConnectionReestablishmentComplete(Previous) don't receive, delete the Previous UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP));
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1000;
rrc_eNB_previous_SRB2(ue_context_p);
ue_context_p->ue_context.ue_reestablishment_timer = 0;
}
}
//c-plane not end
if((ue_context_p->ue_context.Status != RRC_RECONFIGURED) && (ue_context_p->ue_context.reestablishment_cause == LTE_ReestablishmentCause_spare1)) {
LOG_E(RRC,
PROTOCOL_RRC_CTXT_UE_FMT" LTE_RRCConnectionReestablishmentRequest (UE %x c-plane is not end), let's reject the UE\n",
PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP),c_rnti);
rrc_eNB_generate_RRCConnectionReestablishmentReject(ctxt_pP, ue_context_p, CC_id);
break;
}
if(ue_context_p->ue_context.ue_reestablishment_timer > 0) {
......@@ -7921,7 +8010,7 @@ rrc_eNB_decode_dcch(
}
RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
ue_context_p->ue_context.ue_reestablishment_timer = 0;
ue_context_p->ue_context.reestablishment_xid = -1;
if (ul_dcch_msg->message.choice.c1.choice.rrcConnectionReestablishmentComplete.criticalExtensions.present ==
LTE_RRCConnectionReestablishmentComplete__criticalExtensions_PR_rrcConnectionReestablishmentComplete_r8) {
......
......@@ -935,6 +935,8 @@ int rrc_eNB_process_S1AP_INITIAL_CONTEXT_SETUP_REQ(MessageDef *msg_p, const char
gtpv1u_enb_create_tunnel_req_t create_tunnel_req;
gtpv1u_enb_create_tunnel_resp_t create_tunnel_resp;
uint8_t inde_list[NB_RB_MAX - 3]= {0};
int ret;
struct rrc_eNB_ue_context_s *ue_context_p = NULL;
protocol_ctxt_t ctxt;
ue_initial_id = S1AP_INITIAL_CONTEXT_SETUP_REQ (msg_p).ue_initial_id;
......@@ -976,10 +978,23 @@ int rrc_eNB_process_S1AP_INITIAL_CONTEXT_SETUP_REQ(MessageDef *msg_p, const char
create_tunnel_req.rnti = ue_context_p->ue_context.rnti; // warning put zero above
// create_tunnel_req.num_tunnels = i;
gtpv1u_create_s1u_tunnel(
ret = gtpv1u_create_s1u_tunnel(
instance,
&create_tunnel_req,
&create_tunnel_resp);
if ( ret != 0 ) {
LOG_E(RRC,"rrc_eNB_process_S1AP_INITIAL_CONTEXT_SETUP_REQ : gtpv1u_create_s1u_tunnel failed,start to release UE %x\n",ue_context_p->ue_context.rnti);
ue_context_p->ue_context.ue_release_timer_s1 = 1;
ue_context_p->ue_context.ue_release_timer_thres_s1 = 100;
ue_context_p->ue_context.ue_release_timer = 0;
ue_context_p->ue_context.ue_reestablishment_timer = 0;
ue_context_p->ue_context.ul_failure_timer = 20000; // set ul_failure to 20000 for triggering rrc_eNB_send_S1AP_UE_CONTEXT_RELEASE_REQ
rrc_eNB_free_UE(ctxt.module_id,ue_context_p);
ue_context_p->ue_context.ul_failure_timer = 0;
return (0);
}
rrc_eNB_process_GTPV1U_CREATE_TUNNEL_RESP(
&ctxt,
&create_tunnel_resp,
......@@ -1252,6 +1267,8 @@ int rrc_eNB_process_S1AP_E_RAB_SETUP_REQ(MessageDef *msg_p, const char *msg_name
struct rrc_eNB_ue_context_s *ue_context_p = NULL;
protocol_ctxt_t ctxt;
uint8_t e_rab_done;
int ret = 0;
ue_initial_id = S1AP_E_RAB_SETUP_REQ (msg_p).ue_initial_id;
eNB_ue_s1ap_id = S1AP_E_RAB_SETUP_REQ (msg_p).eNB_ue_s1ap_id;
ue_context_p = rrc_eNB_get_ue_context_from_s1ap_ids(instance, ue_initial_id, eNB_ue_s1ap_id);
......@@ -1314,10 +1331,22 @@ int rrc_eNB_process_S1AP_E_RAB_SETUP_REQ(MessageDef *msg_p, const char *msg_name
create_tunnel_req.rnti = ue_context_p->ue_context.rnti; // warning put zero above
create_tunnel_req.num_tunnels = e_rab_done;
// NN: not sure if we should create a new tunnel: need to check teid, etc.
gtpv1u_create_s1u_tunnel(
ret = gtpv1u_create_s1u_tunnel(
instance,
&create_tunnel_req,
&create_tunnel_resp);
if ( ret != 0 ) {
LOG_E(RRC,"rrc_eNB_process_S1AP_E_RAB_SETUP_REQ : gtpv1u_create_s1u_tunnel failed,start to release UE %x\n",ue_context_p->ue_context.rnti);
ue_context_p->ue_context.ue_release_timer_s1 = 1;
ue_context_p->ue_context.ue_release_timer_thres_s1 = 100;
ue_context_p->ue_context.ue_release_timer = 0;
ue_context_p->ue_context.ue_reestablishment_timer = 0;
ue_context_p->ue_context.ul_failure_timer = 20000; // set ul_failure to 20000 for triggering rrc_eNB_send_S1AP_UE_CONTEXT_RELEASE_REQ
rrc_eNB_free_UE(ctxt.module_id,ue_context_p);
ue_context_p->ue_context.ul_failure_timer = 0;
return (0);
}
rrc_eNB_process_GTPV1U_CREATE_TUNNEL_RESP(
&ctxt,
&create_tunnel_resp,
......
......@@ -905,6 +905,10 @@ gtpv1u_create_s1u_tunnel(
int ip_offset = 0;
in_addr_t in_addr;
int addrs_length_in_bytes= 0;
int loop_counter = 0;
int ret = 0;
MSC_LOG_RX_MESSAGE(
MSC_GTPU_ENB,
MSC_RRC_ENB,
......@@ -919,6 +923,7 @@ gtpv1u_create_s1u_tunnel(
for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) {
ip_offset = 0;
loop_counter = 0;
eps_bearer_id = create_tunnel_req_pP->eps_bearer_id[i];
LOG_D(GTPU, "Rx GTPV1U_ENB_CREATE_TUNNEL_REQ ue rnti %x eps bearer id %u\n",
create_tunnel_req_pP->rnti, eps_bearer_id);
......@@ -933,7 +938,13 @@ gtpv1u_create_s1u_tunnel(
stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0;
rc = nwGtpv1uProcessUlpReq(RC.gtpv1u_data_g->gtpv1u_stack, &stack_req);
LOG_D(GTPU, ".\n");
} while (rc != NW_GTPV1U_OK);
loop_counter++;
} while (rc != NW_GTPV1U_OK && loop_counter < 10);
if ( rc != NW_GTPV1U_OK && loop_counter == 10 ) {
LOG_E(GTPU,"NwGtpv1uCreateTunnelEndPoint failed 10 times,start next loop\n");
ret = -1;
continue;
}
//-----------------------
// PDCP->GTPV1U mapping
......@@ -1015,7 +1026,8 @@ gtpv1u_create_s1u_tunnel(
LOG_D(GTPU, "Tx GTPV1U_ENB_CREATE_TUNNEL_RESP ue rnti %x status %d\n",
create_tunnel_req_pP->rnti,
create_tunnel_resp_pP->status);
return 0;
//return 0;
return ret;
}
int gtpv1u_update_s1u_tunnel(
......@@ -1046,9 +1058,15 @@ int gtpv1u_update_s1u_tunnel(
memcpy(gtpv1u_ue_data_new_p,gtpv1u_ue_data_p,sizeof(gtpv1u_ue_data_t));
gtpv1u_ue_data_new_p->ue_id = create_tunnel_req_pP->rnti;
hash_rc = hashtable_insert(RC.gtpv1u_data_g->ue_mapping, create_tunnel_req_pP->rnti, gtpv1u_ue_data_new_p);
AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable");
//AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable");
if ( hash_rc != HASH_TABLE_OK ) {
LOG_E(GTPU,"Failed to insert ue_mapping(rnti=%x) in GTPV1U hashtable\n",create_tunnel_req_pP->rnti);
return -1;
} else {
LOG_I(GTPU, "inserting ue_mapping(rnti=%x) in GTPV1U hashtable\n",
create_tunnel_req_pP->rnti);
}
hash_rc = hashtable_remove(RC.gtpv1u_data_g->ue_mapping, prior_rnti);
LOG_I(GTPU, "hashtable_remove ue_mapping(rnti=%x) in GTPV1U hashtable\n",
prior_rnti);
......
......@@ -378,10 +378,15 @@ NwGtpv1uCreateTunnelEndPoint( NW_IN NwGtpv1uStackT *thiz,
*phStackSession = (NwGtpv1uStackSessionHandleT) pTunnelEndPoint;
pTunnelEndPoint = RB_FIND(NwGtpv1uTunnelEndPointIdentifierMap,
&(thiz->teidMap), pTunnelEndPoint);
NW_ASSERT(pTunnelEndPoint);
//NW_ASSERT(pTunnelEndPoint);
if (!pTunnelEndPoint) {
GTPU_ERROR("Tunnel end-point cannot be NULL");
rc = NW_GTPV1U_FAILURE;
} else {
GTPU_DEBUG("Tunnel end-point 0x%p creation successful for teid 0x%x %u(dec)",
pTunnelEndPoint, (unsigned int)teid, (unsigned int)teid);
}
}
} else {
*phStackSession = (NwGtpv1uStackSessionHandleT) NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment