Commit ef223d8f authored by Vijay C's avatar Vijay C

Merge branch 'PDCP_Unbound_fix' into '3GPP_TTCN_System_Simulator'

Fixed the PDCP unbound issue when UE is released

See merge request firecell/rdsubscription/sequansrd!153
parents 99f1eee1 1d5202dd
......@@ -1918,7 +1918,7 @@ int oai_nfapi_dl_config_req(nfapi_dl_config_request_t *dl_config_req) {
/* Below if condition checking if dl_config_req is for MIB (sf == 0) or SIB1 ((sfn % 2 == 0) && (sf == 5)) */
dl_config_req->header.message_id = NFAPI_DL_CONFIG_REQUEST;
LOG_I(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, dl_config_req->header.phy_id, sfn, sf);
LOG_D(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, dl_config_req->header.phy_id, sfn, sf);
if (dl_config_req->dl_config_request_body.number_pdu > 0)
{
for (int i = 0; i < dl_config_req->dl_config_request_body.number_pdu; i++)
......@@ -1994,7 +1994,7 @@ int oai_nfapi_tx_req(nfapi_tx_request_t *tx_req)
tx_req->header.message_id = NFAPI_TX_REQUEST;
//LOG_D(PHY, "[VNF] %s() TX_REQ sfn_sf:%d number_of_pdus:%d\n", __FUNCTION__, NFAPI_SFNSF2DEC(tx_req->sfn_sf), tx_req->tx_request_body.number_of_pdus);
LOG_I(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, tx_req->header.phy_id, sfn, sf);
LOG_D(NFAPI_VNF, "MultiCell: fxn:%s phy_id:%d sfn:%d sf:%d \n", __FUNCTION__, tx_req->header.phy_id, sfn, sf);
retval = nfapi_vnf_p7_tx_req(p7_config, tx_req);
if (retval!=0) {
LOG_E(PHY, "%s() Problem sending tx_req for phyId:%d :%d\n", __FUNCTION__, tx_req->header.phy_id ,retval);
......
......@@ -790,7 +790,7 @@ void schedule_response(Sched_Rsp_t *Sched_INFO, void *arg) {
eNB->pdcch_vars[subframe&1].num_dci = 0;
eNB->phich_vars[subframe&1].num_hi = 0;
eNB->mpdcch_vars[subframe&1].num_dci = 0;
LOG_A(PHY,"NFAPI: Sched_INFO:SFN/SF:%04d%d CC_id:%d DL_req:SFN/SF:%04d%d:dl_pdu:%d tx_req:SFN/SF:%04d%d:pdus:%d\n",
LOG_D(PHY,"NFAPI: Sched_INFO:SFN/SF:%04d%d CC_id:%d DL_req:SFN/SF:%04d%d:dl_pdu:%d tx_req:SFN/SF:%04d%d:pdus:%d\n",
frame,subframe,CC_id,
NFAPI_SFNSF2SFN(DL_req->sfn_sf),NFAPI_SFNSF2SF(DL_req->sfn_sf),number_dl_pdu,
NFAPI_SFNSF2SFN(TX_req->sfn_sf),NFAPI_SFNSF2SF(TX_req->sfn_sf),TX_req->tx_request_body.number_of_pdus
......
......@@ -1560,13 +1560,23 @@ pdcp_run (
SS_GET_PDCP_CNT(message_p).size = 0;
ue_rnti = SS_REQ_PDCP_CNT(msg_p).rnti;
uint8_t rb_idx = 0;
if (SS_REQ_PDCP_CNT(msg_p).rb_id == 0xFF)
int UE_id = find_UE_id(ctxt_pP->module_id,ue_rnti);
/*
* The PDCP_GET_CNT request can be received when the UE is being released
* check if UE is active : fill the RB details
* if UE not Active : Update one SRB with 0 a dummy response as TTCN expects
* proper PDCP_GET_response
*/
if (UE_id != -1)
{
LOG_D(PDCP, "PDCP Received request PDCP COUNT for all RB's\n");
for (int i = 0; i < MAX_RBS; i++)
uint8_t rb_idx = 0;
if (SS_REQ_PDCP_CNT(msg_p).rb_id == 0xFF)
{
LOG_D(PDCP, "PDCP Received request PDCP COUNT for all RB's\n");
for (int i = 0; i < MAX_RBS; i++)
{
if (i < 3)
{
rbid_ = i;
......@@ -1578,53 +1588,69 @@ pdcp_run (
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0);
}
h_rc = hashtable_get(pdcp_coll_p, key, (void **) &pdcp_p);
h_rc = hashtable_get(pdcp_coll_p, key, (void **)&pdcp_p);
if (h_rc == HASH_TABLE_OK)
{
/** Fill response */
LOG_D (PDCP, "Found entry on hastable for rbid_ : %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
LOG_D(PDCP, "Found entry on hastable for rbid_ : %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
pdcp_fill_ss_pdcp_cnt(pdcp_p, rb_idx, &(SS_GET_PDCP_CNT(message_p)));
/** Increase the array index for next RB IDX */
rb_idx ++;
rb_idx++;
}
else
{
LOG_D (PDCP, "No entry on hastable for rbid_: %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
LOG_E(PDCP, "No entry on hastable for rbid_: %d ctxt module_id %d rnti %d enb_flag %d\n",
rbid_, ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag);
}
}
}
}
else
{
rb_idx = SS_REQ_PDCP_CNT(msg_p).rb_id;
LOG_A(PDCP, "PDCP Received request PDCP COUNT for Single RB:%d\n",
SS_REQ_PDCP_CNT(message_p).rb_id);
if (rb_idx < 3)
else
{
rb_idx = SS_REQ_PDCP_CNT(msg_p).rb_id;
LOG_A(PDCP, "PDCP Received request PDCP COUNT for Single RB:%d\n",
SS_REQ_PDCP_CNT(message_p).rb_id);
if (rb_idx < 3)
{
rbid_ = rb_idx;
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 1);
}
else
{
}
else
{
rbid_ = rb_idx - 3;
key = PDCP_COLL_KEY_VALUE(ctxt_pP->module_id, ue_rnti, ctxt_pP->enb_flag, rbid_, 0);
}
}
h_rc = hashtable_get(pdcp_coll_p, key, (void **) &pdcp_p);
if (h_rc == HASH_TABLE_OK)
{
h_rc = hashtable_get(pdcp_coll_p, key, (void **)&pdcp_p);
if (h_rc == HASH_TABLE_OK)
{
if (SS_REQ_PDCP_CNT(message_p).rb_id <= MAX_RBS)
{
/** For single RB always updating at the 0th index only */
pdcp_fill_ss_pdcp_cnt(pdcp_p, 0, &(SS_GET_PDCP_CNT(message_p)));
}
}
else
{
LOG_E(PDCP, "No entry for single RB on hastable for rbid_: %d\n", rbid_);
}
}
else
{
LOG_D (PDCP, "No entry for single RB on hastable for rbid_: %d\n", rbid_);
}
}
else
{
//Filling the dummy PDCP_CNT response as UE is not ACTIVE
ss_get_pdcp_cnt_t *pc = &(SS_GET_PDCP_CNT(message_p));
pc->size += 1;
pc->rb_info[0].rb_id = 0;
pc->rb_info[0].is_srb = true;
pc->rb_info[0].ul_format = E_PdcpCount_Srb;
pc->rb_info[0].dl_format = E_PdcpCount_Srb;
pc->rb_info[0].ul_count = 0;
pc->rb_info[0].dl_count = 0;
LOG_D(PDCP, "SRB %d DL Count (dec): %d UL Count (dec): %d\n", pc->rb_info[0].rb_id,
pc->rb_info[0].dl_count,
pc->rb_info[0].ul_count);
}
itti_send_msg_to_task (TASK_SYS, ctxt_pP->module_id, message_p);
......
......@@ -204,7 +204,6 @@ static int sys_send_udp_msg(
if (message_p)
{
LOG_A(ENB_SS, "Sending UDP_DATA_REQ length %u offset %u buffer %d %d %d to address peerIpAddr:%s and peer port:peerPort\n", buffer_len, buffer_offset, buffer[0], buffer[1], buffer[2], peerIpAddr, peerPort);
udp_data_req_p = &message_p->ittiMsg.udp_data_req;
udp_data_req_p->peer_address = peerIpAddr;
udp_data_req_p->peer_port = peerPort;
......
......@@ -167,7 +167,7 @@ void *ss_eNB_vt_timer_process_itti_msg(void *notUsed)
ss_set_timinfo_t tinfo;
tinfo.sf = SS_UPD_TIM_INFO(received_msg).sf;
tinfo.sfn = SS_UPD_TIM_INFO(received_msg).sfn;
LOG_A(ENB_APP, "[VT_TIMER] received_UPD_TIM_INFO SFN: %d SF: %d\n", tinfo.sfn, tinfo.sf);
LOG_D(ENB_APP, "[VT_TIMER] received_UPD_TIM_INFO SFN: %d SF: %d\n", tinfo.sfn, tinfo.sf);
ss_vt_timer_check(tinfo);
}
......
......@@ -289,7 +289,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,
printf("Error in itti_send_msg_to_task");
// LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */
}
LOG_A(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to VTP task itti_send_msg_to_task sfn %d sf %d",
LOG_D(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to VTP task itti_send_msg_to_task sfn %d sf %d",
eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */
}
MessageDef *message_p_vt_timer = itti_alloc_new_message(TASK_ENB_APP, 0, SS_UPD_TIM_INFO);
......@@ -303,7 +303,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,
printf("Error in itti_send_msg_to_task");
// LOG_E( PHY, "[SS] Error in L1_Thread itti_send_msg_to_task"); /** TODO: Need separate logging for SS */
}
LOG_A(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to TASK_VT_TIMER task itti_send_msg_to_task sfn %d sf %d",
LOG_D(PHY, "[SS] SS_UPD_TIM_INFO from L1_Thread to TASK_VT_TIMER task itti_send_msg_to_task sfn %d sf %d",
eNB->UL_INFO.subframe, eNB->UL_INFO.frame); /** TODO: Need separate logging for SS */
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment