Commit 1d5202dd authored by Vijay Chadachan's avatar Vijay Chadachan

Fixed the PDCP unbound issue when UE is released

- Modified the PDCP response to update dummy count for SRB
- changed the LOG_A which are very frequent to LOG_D
parent e38e62b1
...@@ -1918,7 +1918,7 @@ int oai_nfapi_dl_config_req(nfapi_dl_config_request_t *dl_config_req) { ...@@ -1918,7 +1918,7 @@ int oai_nfapi_dl_config_req(nfapi_dl_config_request_t *dl_config_req) {
/* Below if condition checking if dl_config_req is for MIB (sf == 0) or SIB1 ((sfn % 2 == 0) && (sf == 5)) */ /* Below if condition checking if dl_config_req is for MIB (sf == 0) or SIB1 ((sfn % 2 == 0) && (sf == 5)) */
dl_config_req->header.message_id = NFAPI_DL_CONFIG_REQUEST; dl_config_req->header.message_id = NFAPI_DL_CONFIG_REQUEST;
LOG_I(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, dl_config_req->header.phy_id, sfn, sf); LOG_D(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, dl_config_req->header.phy_id, sfn, sf);
if (dl_config_req->dl_config_request_body.number_pdu > 0) if (dl_config_req->dl_config_request_body.number_pdu > 0)
{ {
for (int i = 0; i < dl_config_req->dl_config_request_body.number_pdu; i++) for (int i = 0; i < dl_config_req->dl_config_request_body.number_pdu; i++)
...@@ -1994,7 +1994,7 @@ int oai_nfapi_tx_req(nfapi_tx_request_t *tx_req) ...@@ -1994,7 +1994,7 @@ int oai_nfapi_tx_req(nfapi_tx_request_t *tx_req)
tx_req->header.message_id = NFAPI_TX_REQUEST; tx_req->header.message_id = NFAPI_TX_REQUEST;
//LOG_D(PHY, "[VNF] %s() TX_REQ sfn_sf:%d number_of_pdus:%d\n", __FUNCTION__, NFAPI_SFNSF2DEC(tx_req->sfn_sf), tx_req->tx_request_body.number_of_pdus); //LOG_D(PHY, "[VNF] %s() TX_REQ sfn_sf:%d number_of_pdus:%d\n", __FUNCTION__, NFAPI_SFNSF2DEC(tx_req->sfn_sf), tx_req->tx_request_body.number_of_pdus);
LOG_I(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, tx_req->header.phy_id, sfn, sf); LOG_D(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, tx_req->header.phy_id, sfn, sf);
retval = nfapi_vnf_p7_tx_req(p7_config, tx_req); retval = nfapi_vnf_p7_tx_req(p7_config, tx_req);
if (retval!=0) { if (retval!=0) {
LOG_E(PHY, "%s() Problem sending tx_req for phyId:%d :%d\n", __FUNCTION__, tx_req->header.phy_id ,retval); LOG_E(PHY, "%s() Problem sending tx_req for phyId:%d :%d\n", __FUNCTION__, tx_req->header.phy_id ,retval);
......
...@@ -790,7 +790,7 @@ void schedule_response(Sched_Rsp_t *Sched_INFO, void *arg) { ...@@ -790,7 +790,7 @@ void schedule_response(Sched_Rsp_t *Sched_INFO, void *arg) {
eNB->pdcch_vars[subframe&1].num_dci = 0; eNB->pdcch_vars[subframe&1].num_dci = 0;
eNB->phich_vars[subframe&1].num_hi = 0; eNB->phich_vars[subframe&1].num_hi = 0;
eNB->mpdcch_vars[subframe&1].num_dci = 0; eNB->mpdcch_vars[subframe&1].num_dci = 0;
LOG_A(PHY,"NFAPI: Sched_INFO:SFN/SF:%04d%d CC_id:%d DL_req:SFN/SF:%04d%d:dl_pdu:%d tx_req:SFN/SF:%04d%d:pdus:%d\n", LOG_D(PHY,"NFAPI: Sched_INFO:SFN/SF:%04d%d CC_id:%d DL_req:SFN/SF:%04d%d:dl_pdu:%d tx_req:SFN/SF:%04d%d:pdus:%d\n",
frame,subframe,CC_id, frame,subframe,CC_id,
NFAPI_SFNSF2SFN(DL_req->sfn_sf),NFAPI_SFNSF2SF(DL_req->sfn_sf),number_dl_pdu, NFAPI_SFNSF2SFN(DL_req->sfn_sf),NFAPI_SFNSF2SF(DL_req->sfn_sf),number_dl_pdu,
NFAPI_SFNSF2SFN(TX_req->sfn_sf),NFAPI_SFNSF2SF(TX_req->sfn_sf),TX_req->tx_request_body.number_of_pdus NFAPI_SFNSF2SFN(TX_req->sfn_sf),NFAPI_SFNSF2SF(TX_req->sfn_sf),TX_req->tx_request_body.number_of_pdus
......
...@@ -1454,13 +1454,23 @@ pdcp_run ( ...@@ -1454,13 +1454,23 @@ pdcp_run (
SS_GET_PDCP_CNT(message_p).size = 0; SS_GET_PDCP_CNT(message_p).size = 0;
ue_rnti = SS_REQ_PDCP_CNT(msg_p).rnti; ue_rnti = SS_REQ_PDCP_CNT(msg_p).rnti;
uint8_t rb_idx = 0; int UE_id = find_UE_id(ctxt_pP->module_id,ue_rnti);
if (SS_REQ_PDCP_CNT(msg_p).rb_id == 0xFF) /*
* The PDCP_GET_CNT request can be received when the UE is being released
* check if UE is active : fill the RB details
* if UE not Active : Update one SRB with 0 a dummy response as TTCN expects
* proper PDCP_GET_response
*/
if (UE_id != -1)
{ {
LOG_D(PDCP, "PDCP Received request PDCP COUNT for all RB's\n"); uint8_t rb_idx = 0;
for (int i = 0; i < MAX_RBS; i++) if (SS_REQ_PDCP_CNT(msg_p).rb_id == 0xFF)
{ {
LOG_D(PDCP, "PDCP Received request PDCP COUNT for all RB's\n");
for (int i = 0; i < MAX_RBS; i++)
{
if (i < 3) if (i < 3)
{ {
rbid_ = i; rbid_ = i;
...@@ -1472,53 +1482,69 @@ pdcp_run ( ...@@ -1472,53 +1482,69 @@ pdcp_run (
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0); key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0);
} }
h_rc = hashtable_get(pdcp_coll_p, key, (void **) &pdcp_p); h_rc = hashtable_get(pdcp_coll_p, key, (void **)&pdcp_p);
if (h_rc == HASH_TABLE_OK) if (h_rc == HASH_TABLE_OK)
{ {
/** Fill response */ /** Fill response */
LOG_D (PDCP, "Found entry on hastable for rbid_ : %d ctxt module_id %d rnti %d enb_flag %d\n", LOG_D(PDCP, "Found entry on hastable for rbid_ : %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag); rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
pdcp_fill_ss_pdcp_cnt(pdcp_p, rb_idx, &(SS_GET_PDCP_CNT(message_p))); pdcp_fill_ss_pdcp_cnt(pdcp_p, rb_idx, &(SS_GET_PDCP_CNT(message_p)));
/** Increase the array index for next RB IDX */ /** Increase the array index for next RB IDX */
rb_idx ++; rb_idx++;
} }
else else
{ {
LOG_D (PDCP, "No entry on hastable for rbid_: %d ctxt module_id %d rnti %d enb_flag %d\n", LOG_E(PDCP, "No entry on hastable for rbid_: %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag); rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
}
} }
} }
} else
else
{
rb_idx = SS_REQ_PDCP_CNT(msg_p).rb_id;
LOG_A(PDCP, "PDCP Received request PDCP COUNT for Single RB:%d\n",
SS_REQ_PDCP_CNT(message_p).rb_id);
if (rb_idx < 3)
{ {
rb_idx = SS_REQ_PDCP_CNT(msg_p).rb_id;
LOG_A(PDCP, "PDCP Received request PDCP COUNT for Single RB:%d\n",
SS_REQ_PDCP_CNT(message_p).rb_id);
if (rb_idx < 3)
{
rbid_ = rb_idx; rbid_ = rb_idx;
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 1); key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 1);
} }
else else
{ {
rbid_ = rb_idx - 3; rbid_ = rb_idx - 3;
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0); key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0);
} }
h_rc = hashtable_get(pdcp_coll_p, key, (void **) &pdcp_p); h_rc = hashtable_get(pdcp_coll_p, key, (void **)&pdcp_p);
if (h_rc == HASH_TABLE_OK) if (h_rc == HASH_TABLE_OK)
{ {
if (SS_REQ_PDCP_CNT(message_p).rb_id <= MAX_RBS) if (SS_REQ_PDCP_CNT(message_p).rb_id <= MAX_RBS)
{ {
/** For single RB always updating at the 0th index only */ /** For single RB always updating at the 0th index only */
pdcp_fill_ss_pdcp_cnt(pdcp_p, 0, &(SS_GET_PDCP_CNT(message_p))); pdcp_fill_ss_pdcp_cnt(pdcp_p, 0, &(SS_GET_PDCP_CNT(message_p)));
} }
}
else
{
LOG_E(PDCP, "No entry for single RB on hastable for rbid_: %d\n", rbid_);
}
} }
else }
{ else
LOG_D (PDCP, "No entry for single RB on hastable for rbid_: %d\n", rbid_); {
} //Filling the dummy PDCP_CNT response as UE is not ACTIVE
ss_get_pdcp_cnt_t *pc = &(SS_GET_PDCP_CNT(message_p));
pc->size += 1;
pc->rb_info[0].rb_id = 0;
pc->rb_info[0].is_srb = true;
pc->rb_info[0].ul_format = E_PdcpCount_Srb;
pc->rb_info[0].dl_format = E_PdcpCount_Srb;
pc->rb_info[0].ul_count = 0;
pc->rb_info[0].dl_count = 0;
LOG_D(PDCP, "SRB %d DL Count (dec): %d UL Count (dec): %d\n", pc->rb_info[0].rb_id,
pc->rb_info[0].dl_count,
pc->rb_info[0].ul_count);
} }
itti_send_msg_to_task (TASK_SYS, ctxt_pP->module_id, message_p); itti_send_msg_to_task (TASK_SYS, ctxt_pP->module_id, message_p);
......
...@@ -204,7 +204,6 @@ static int sys_send_udp_msg( ...@@ -204,7 +204,6 @@ static int sys_send_udp_msg(
if (message_p) if (message_p)
{ {
LOG_A(ENB_SS, "Sending UDP_DATA_REQ length %u offset %u buffer %d %d %d to address peerIpAddr:%s and peer port:peerPort\n", buffer_len, buffer_offset, buffer[0], buffer[1], buffer[2], peerIpAddr, peerPort);
udp_data_req_p = &message_p->ittiMsg.udp_data_req; udp_data_req_p = &message_p->ittiMsg.udp_data_req;
udp_data_req_p->peer_address = peerIpAddr; udp_data_req_p->peer_address = peerIpAddr;
udp_data_req_p->peer_port = peerPort; udp_data_req_p->peer_port = peerPort;
......
...@@ -167,7 +167,7 @@ void *ss_eNB_vt_timer_process_itti_msg(void *notUsed) ...@@ -167,7 +167,7 @@ void *ss_eNB_vt_timer_process_itti_msg(void *notUsed)
ss_set_timinfo_t tinfo; ss_set_timinfo_t tinfo;
tinfo.sf = SS_UPD_TIM_INFO(received_msg).sf; tinfo.sf = SS_UPD_TIM_INFO(received_msg).sf;
tinfo.sfn = SS_UPD_TIM_INFO(received_msg).sfn; tinfo.sfn = SS_UPD_TIM_INFO(received_msg).sfn;
LOG_A(ENB_APP, "[VT_TIMER] received_UPD_TIM_INFO SFN: %d SF: %d\n", tinfo.sfn, tinfo.sf); LOG_D(ENB_APP, "[VT_TIMER] received_UPD_TIM_INFO SFN: %d SF: %d\n", tinfo.sfn, tinfo.sf);
ss_vt_timer_check(tinfo); ss_vt_timer_check(tinfo);
} }
......
...@@ -289,7 +289,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB, ...@@ -289,7 +289,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,
printf("Error in itti_send_msg_to_task"); printf("Error in itti_send_msg_to_task");
// LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */ // LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */
} }
LOG_A(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to VTP task itti_send_msg_to_task sfn %d sf %d", LOG_D(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to VTP task itti_send_msg_to_task sfn %d sf %d",
eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */ eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */
} }
MessageDef *message_p_vt_timer = itti_alloc_new_message(TASK_ENB_APP, 0, SS_UPD_TIM_INFO); MessageDef *message_p_vt_timer = itti_alloc_new_message(TASK_ENB_APP, 0, SS_UPD_TIM_INFO);
...@@ -303,7 +303,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB, ...@@ -303,7 +303,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,
printf("Error in itti_send_msg_to_task"); printf("Error in itti_send_msg_to_task");
// LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */ // LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */
} }
LOG_A(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to TASK_VT_TIMER task itti_send_msg_to_task sfn %d sf %d", LOG_D(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to TASK_VT_TIMER task itti_send_msg_to_task sfn %d sf %d",
eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */ eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment