Commit 85440e2d authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/gtpv1u-data-no-itti' into integration_2024_w50 (!3158)

GTP: use direct call instead of ITTI

Using ITTI calls for user plane implies (1) a memory allocation, (2)
mutexes, and (3) queueing messages for each user plane packet, which is
heavy. Use a direct API call instead to reduce overhead.
parents 61849adf 27f1c0db
...@@ -1474,6 +1474,7 @@ add_library(L2_UE ...@@ -1474,6 +1474,7 @@ add_library(L2_UE
${MAC_SRC_UE} ${MAC_SRC_UE}
) )
target_link_libraries(L2_UE PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs) target_link_libraries(L2_UE PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs)
target_link_libraries(L2_UE PRIVATE GTPV1U)
add_library(L2_UE_LTE_NR add_library(L2_UE_LTE_NR
${L2_RRC_SRC_UE} ${L2_RRC_SRC_UE}
......
...@@ -378,15 +378,15 @@ You might also want to consult TS 38.401 regarding the message exchange. ...@@ -378,15 +378,15 @@ You might also want to consult TS 38.401 regarding the message exchange.
### General ### General
In the DU in UL, RLC checks in `deliver_sdu()` if we are operating in split In the DU in UL, RLC checks in `deliver_sdu()` if we are operating in split
mode, and either (direct) calls `pdcp_data_ind` (DRB) or (f1ap) sends an mode, and either (direct) calls `pdcp_data_ind` (DRB) or (f1ap) sends a GTP
`GTPV1U_TUNNEL_DATA_REQ` ITTI message to the GTP task. message through the GTP API.
In the CU in UL, assuming the tunnel is in place, GTP decapsulates the packet In the CU in UL, assuming the tunnel is in place, GTP decapsulates the packet
and calls the callback `cu_f1u_data_req()`, which calls `pdcp_data_ind()` in CU. and calls the callback `cu_f1u_data_req()`, which calls `pdcp_data_ind()` in CU.
In the CU in DL, the PDCP function `deliver_pdu_drb_gnb()` either (direct) calls In the CU in DL, the PDCP function `deliver_pdu_drb_gnb()` either (direct) calls
into the RLC via `enqueue_rlc_data_req()`, or (f1ap) sends a into the RLC via `enqueue_rlc_data_req()`, or (f1ap) sends a GTP message
`GTPV1U_TUNNEL_DATA_REQ` ITTI message to the GTP task. through the GTP API.
In the DU in DL, assuming the GTP-U tunnel exists, GTP decapsulates the packet In the DU in DL, assuming the GTP-U tunnel exists, GTP decapsulates the packet
and calls the reception call back `du_rlc_data_req()`, which calls and calls the reception call back `du_rlc_data_req()`, which calls
......
...@@ -338,7 +338,7 @@ On the Tx side (downlink in gNB), the entry functions `nr_pdcp_data_req_drb()` a ...@@ -338,7 +338,7 @@ On the Tx side (downlink in gNB), the entry functions `nr_pdcp_data_req_drb()` a
## PDCP Rx flow ## PDCP Rx flow
At the Rx side, `pdcp_data_ind()` serves as the entry point for receiving data from RLC. Within `pdcp_data_ind()`, the PDCP manager mutex protects access to the PDU receiving function of PDCP (`recv_pdu()` callback corresponding to `nr_pdcp_entity_recv_pdu()` for DRBs). Following this, the `deliver_sdu_drb()` function dispatches the received data to the GTP thread via an ITTI message (`GTPV1U_TUNNEL_DATA_REQ`). At the Rx side, `pdcp_data_ind()` serves as the entry point for receiving data from RLC. Within `pdcp_data_ind()`, the PDCP manager mutex protects access to the PDU receiving function of PDCP (`recv_pdu()` callback corresponding to `nr_pdcp_entity_recv_pdu()` for DRBs). Following this, the `deliver_sdu_drb()` function dispatches the received data to the SDAP sublayer.
## PDCP security ## PDCP security
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
*/ */
MESSAGE_DEF(GTPV1U_TUNNEL_DATA_REQ, MESSAGE_PRIORITY_MED, gtpv1u_tunnel_data_req_t, Gtpv1uTunnelDataReq)
MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_req_t,Gtpv1uDataForwardingReq) MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_req_t,Gtpv1uDataForwardingReq)
MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_ind_t,Gtpv1uDataForwardingInd) MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_ind_t,Gtpv1uDataForwardingInd)
MESSAGE_DEF(GTPV1U_ENB_END_MARKER_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_req_t, Gtpv1uEndMarkerReq) MESSAGE_DEF(GTPV1U_ENB_END_MARKER_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_req_t, Gtpv1uEndMarkerReq)
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#define NR_GTPV1U_MAX_BEARERS_PER_UE max_val_NR_DRB_Identity #define NR_GTPV1U_MAX_BEARERS_PER_UE max_val_NR_DRB_Identity
#define GTPV1U_ENB_TUNNEL_DATA_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataInd #define GTPV1U_ENB_TUNNEL_DATA_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataInd
#define GTPV1U_TUNNEL_DATA_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataReq
#define GTPV1U_ENB_DATA_FORWARDING_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingReq #define GTPV1U_ENB_DATA_FORWARDING_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingReq
#define GTPV1U_ENB_DATA_FORWARDING_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingInd #define GTPV1U_ENB_DATA_FORWARDING_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingInd
#define GTPV1U_ENB_END_MARKER_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerReq #define GTPV1U_ENB_END_MARKER_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerReq
...@@ -107,15 +106,6 @@ typedef struct gtpv1u_enb_delete_tunnel_resp_s { ...@@ -107,15 +106,6 @@ typedef struct gtpv1u_enb_delete_tunnel_resp_s {
teid_t enb_S1u_teid; ///< local S1U Tunnel Endpoint Identifier to be deleted teid_t enb_S1u_teid; ///< local S1U Tunnel Endpoint Identifier to be deleted
} gtpv1u_enb_delete_tunnel_resp_t; } gtpv1u_enb_delete_tunnel_resp_t;
typedef struct gtpv1u_tunnel_data_req_s {
uint8_t *buffer;
uint32_t length;
uint32_t offset; ///< start of message offset in buffer
ue_id_t ue_id;
rb_id_t bearer_id;
} gtpv1u_tunnel_data_req_t;
typedef struct gtpv1u_enb_data_forwarding_req_s { typedef struct gtpv1u_enb_data_forwarding_req_s {
uint8_t *buffer; uint8_t *buffer;
uint32_t length; uint32_t length;
......
...@@ -597,7 +597,6 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP, ...@@ -597,7 +597,6 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP,
uint8_t rb_offset= (srb_flagP == 0) ? DTCH -1 :0; uint8_t rb_offset= (srb_flagP == 0) ? DTCH -1 :0;
uint16_t pdcp_uid=0; uint16_t pdcp_uid=0;
MessageDef *message_p = NULL;
uint32_t rx_hfn_for_count; uint32_t rx_hfn_for_count;
int pdcp_sn_for_count; int pdcp_sn_for_count;
int security_ok; int security_ok;
...@@ -999,19 +998,11 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP, ...@@ -999,19 +998,11 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP,
if (LINK_ENB_PDCP_TO_GTPV1U) { if (LINK_ENB_PDCP_TO_GTPV1U) {
if ((true == ctxt_pP->enb_flag) && (false == srb_flagP)) { if ((true == ctxt_pP->enb_flag) && (false == srb_flagP)) {
LOG_D(PDCP, "Sending packet to GTP, Calling GTPV1U_TUNNEL_DATA_REQ ue %lx rab %ld len %u\n", ctxt_pP->rntiMaybeUEid, rb_id + 4, sdu_buffer_sizeP - payload_offset); ue_id_t ue_id = ctxt_pP->rntiMaybeUEid;
message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB, 0, GTPV1U_TUNNEL_DATA_REQ, uint8_t *gtp_buf = sdu_buffer_pP + payload_offset;
sizeof(gtpv1u_tunnel_data_req_t) + size_t gtp_len = sdu_buffer_sizeP - payload_offset;
sdu_buffer_sizeP - payload_offset + GTPU_HEADER_OVERHEAD_MAX ); LOG_D(PDCP, "Sending packet to GTP ue %lx rab %ld len %ld\n", ue_id, rb_id + 4, gtp_len);
AssertFatal(message_p != NULL, "OUT OF MEMORY"); gtpv1uSendDirect(INSTANCE_DEFAULT, ue_id, rb_id + 4, gtp_buf, gtp_len, false, false);
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(message_p);
req->buffer = (uint8_t*)(req+1);
memcpy(req->buffer + GTPU_HEADER_OVERHEAD_MAX, sdu_buffer_pP + payload_offset, sdu_buffer_sizeP - payload_offset);
req->length = sdu_buffer_sizeP - payload_offset;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ctxt_pP->rntiMaybeUEid;
req->bearer_id = rb_id + 4;
itti_send_msg_to_task(TASK_GTPV1_U, INSTANCE_DEFAULT, message_p);
packet_forwarded = true; packet_forwarded = true;
} }
} else { } else {
......
...@@ -718,23 +718,9 @@ static void deliver_pdu_drb_gnb(void *deliver_pdu_data, ue_id_t ue_id, int rb_id ...@@ -718,23 +718,9 @@ static void deliver_pdu_drb_gnb(void *deliver_pdu_data, ue_id_t ue_id, int rb_id
protocol_ctxt_t ctxt = { .enb_flag = 1, .rntiMaybeUEid = ue_data.secondary_ue }; protocol_ctxt_t ctxt = { .enb_flag = 1, .rntiMaybeUEid = ue_data.secondary_ue };
if (NODE_IS_CU(node_type)) { if (NODE_IS_CU(node_type)) {
MessageDef *message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB, 0,
GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t)
+ size
+ GTPU_HEADER_OVERHEAD_MAX);
AssertFatal(message_p != NULL, "OUT OF MEMORY");
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(message_p);
uint8_t *gtpu_buffer_p = (uint8_t*)(req+1);
memcpy(gtpu_buffer_p + GTPU_HEADER_OVERHEAD_MAX, buf, size);
req->buffer = gtpu_buffer_p;
req->length = size;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ue_id; // use CU UE ID as GTP will use that to look up TEID
req->bearer_id = rb_id;
LOG_D(PDCP, "%s() (drb %d) sending message to gtp size %d\n", __func__, rb_id, size); LOG_D(PDCP, "%s() (drb %d) sending message to gtp size %d\n", __func__, rb_id, size);
extern instance_t CUuniqInstance; extern instance_t CUuniqInstance;
itti_send_msg_to_task(TASK_GTPV1_U, CUuniqInstance, message_p); gtpv1uSendDirect(CUuniqInstance, ue_id, rb_id, (uint8_t *)buf, size, false, false);
} else { } else {
uint8_t *memblock = malloc16(size); uint8_t *memblock = malloc16(size);
memcpy(memblock, buf, size); memcpy(memblock, buf, size);
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "openair2/F1AP/f1ap_du_rrc_message_transfer.h" #include "openair2/F1AP/f1ap_du_rrc_message_transfer.h"
#include "openair2/F1AP/f1ap_ids.h" #include "openair2/F1AP/f1ap_ids.h"
#include "openair3/ocp-gtpu/gtp_itf.h"
extern RAN_CONTEXT_t RC; extern RAN_CONTEXT_t RC;
...@@ -525,18 +526,9 @@ rb_found: ...@@ -525,18 +526,9 @@ rb_found:
itti_send_msg_to_task(TASK_DU_F1, ENB_MODULE_ID_TO_INSTANCE(0 /*ctxt_pP->module_id*/), msg); itti_send_msg_to_task(TASK_DU_F1, ENB_MODULE_ID_TO_INSTANCE(0 /*ctxt_pP->module_id*/), msg);
return; return;
} else { } else {
MessageDef *msg = itti_alloc_new_message_sized(TASK_RLC_ENB, 0, GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t) + size);
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(msg);
req->buffer=(uint8_t*)(req+1);
memcpy(req->buffer,buf,size);
req->length=size;
req->offset = 0;
req->ue_id = ue->ue_id;
req->bearer_id=rb_id;
LOG_D(RLC, "Received uplink user-plane traffic at RLC-DU to be sent to the CU, size %d \n", size); LOG_D(RLC, "Received uplink user-plane traffic at RLC-DU to be sent to the CU, size %d \n", size);
extern instance_t DUuniqInstance; extern instance_t DUuniqInstance;
itti_send_msg_to_task(TASK_GTPV1_U, DUuniqInstance, msg); gtpv1uSendDirect(DUuniqInstance, ue->ue_id, rb_id, (uint8_t*) buf, size, false, false);
return; return;
} }
} }
......
...@@ -240,24 +240,14 @@ static void nr_sdap_rx_entity(nr_sdap_entity_t *entity, ...@@ -240,24 +240,14 @@ static void nr_sdap_rx_entity(nr_sdap_entity_t *entity,
} }
} }
uint8_t *gtp_buf = (uint8_t *)(buf + offset);
size_t gtp_len = size - offset;
// Pushing SDAP SDU to GTP-U Layer // Pushing SDAP SDU to GTP-U Layer
MessageDef *message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB, LOG_D(SDAP, "sending message to gtp size %ld\n", gtp_len);
0,
GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t)
+ size + GTPU_HEADER_OVERHEAD_MAX - offset);
AssertFatal(message_p != NULL, "OUT OF MEMORY");
gtpv1u_tunnel_data_req_t *req = &GTPV1U_TUNNEL_DATA_REQ(message_p);
uint8_t *gtpu_buffer_p = (uint8_t *) (req + 1);
memcpy(gtpu_buffer_p + GTPU_HEADER_OVERHEAD_MAX, buf + offset, size - offset);
req->buffer = gtpu_buffer_p;
req->length = size - offset;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ue_id;
req->bearer_id = pdusession_id;
LOG_D(SDAP, "%s() sending message to gtp size %d\n", __func__, size-offset);
// very very dirty hack gloabl var N3GTPUInst // very very dirty hack gloabl var N3GTPUInst
itti_send_msg_to_task(TASK_GTPV1_U, *N3GTPUInst, message_p); instance_t inst = *N3GTPUInst;
gtpv1uSendDirect(inst, ue_id, pdusession_id, gtp_buf, gtp_len, false, false);
} else { //nrUE } else { //nrUE
/* /*
* TS 37.324 5.2 Data transfer * TS 37.324 5.2 Data transfer
......
...@@ -193,7 +193,7 @@ instance_t legacyInstanceMapping=0; ...@@ -193,7 +193,7 @@ instance_t legacyInstanceMapping=0;
auto ptrUe=insT->ue2te_mapping.find(Ue); \ auto ptrUe=insT->ue2te_mapping.find(Ue); \
\ \
if ( ptrUe==insT->ue2te_mapping.end() ) { \ if ( ptrUe==insT->ue2te_mapping.end() ) { \
LOG_E(GTPU, "[%ld] gtpv1uSend failed: while getting ue id %ld in hashtable ue_mapping\n", instance, ue_id); \ LOG_E(GTPU, "[%ld] %s failed: while getting ue id %ld in hashtable ue_mapping\n", instance, __func__, ue_id); \
pthread_mutex_unlock(&globGtp.gtp_lock); \ pthread_mutex_unlock(&globGtp.gtp_lock); \
return; \ return; \
} }
...@@ -202,7 +202,7 @@ instance_t legacyInstanceMapping=0; ...@@ -202,7 +202,7 @@ instance_t legacyInstanceMapping=0;
auto ptrUe=insT->ue2te_mapping.find(Ue); \ auto ptrUe=insT->ue2te_mapping.find(Ue); \
\ \
if ( ptrUe==insT->ue2te_mapping.end() ) { \ if ( ptrUe==insT->ue2te_mapping.end() ) { \
LOG_E(GTPU, "[%ld] gtpv1uSend failed: while getting ue id %ld in hashtable ue_mapping\n", instance, ue_id); \ LOG_E(GTPU, "[%ld] %s failed: while getting ue id %ld in hashtable ue_mapping\n", instance, __func__, ue_id); \
pthread_mutex_unlock(&globGtp.gtp_lock); \ pthread_mutex_unlock(&globGtp.gtp_lock); \
return GTPNOK; \ return GTPNOK; \
} }
...@@ -286,38 +286,48 @@ static int gtpv1uCreateAndSendMsg(int h, ...@@ -286,38 +286,48 @@ static int gtpv1uCreateAndSendMsg(int h,
return !GTPNOK; return !GTPNOK;
} }
static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool seqNumFlag, bool npduNumFlag) { void gtpv1uSendDirect(instance_t instance,
uint8_t *buffer=req->buffer+req->offset; ue_id_t ue_id,
size_t length=req->length; int bearer_id,
ue_id_t ue_id=req->ue_id; uint8_t *buf,
int bearer_id=req->bearer_id; size_t len,
bool seqNumFlag,
bool npduNumFlag)
{
pthread_mutex_lock(&globGtp.gtp_lock); pthread_mutex_lock(&globGtp.gtp_lock);
getInstRetVoid(compatInst(instance)); getInstRetVoid(compatInst(instance));
getUeRetVoid(inst, ue_id); getUeRetVoid(inst, ue_id);
auto ptr2=ptrUe->second.bearers.find(bearer_id); auto ptr2 = ptrUe->second.bearers.find(bearer_id);
if ( ptr2 == ptrUe->second.bearers.end() ) { if (ptr2 == ptrUe->second.bearers.end()) {
LOG_E(GTPU,"[%ld] GTP-U instance: sending a packet to a non existant UE:RAB: %lx/%x\n", instance, ue_id, bearer_id); LOG_E(GTPU, "[%ld] GTP-U instance: sending a packet to a non existant UE:RAB: %lx/%x\n", instance, ue_id, bearer_id);
pthread_mutex_unlock(&globGtp.gtp_lock); pthread_mutex_unlock(&globGtp.gtp_lock);
return; return;
} }
LOG_D(GTPU,"[%ld] sending a packet to UE:RAB:teid %lx/%x/%x, len %lu, oldseq %d, oldnum %d\n", LOG_D(GTPU,
instance, ue_id, bearer_id,ptr2->second.teid_outgoing,length, ptr2->second.seqNum,ptr2->second.npduNum ); "[%ld] sending a packet to UE:RAB:teid %lx/%x/%x, len %lu, oldseq %d, oldnum %d\n",
instance,
ue_id,
bearer_id,
ptr2->second.teid_outgoing,
len,
ptr2->second.seqNum,
ptr2->second.npduNum);
if(seqNumFlag) if (seqNumFlag)
ptr2->second.seqNum++; ptr2->second.seqNum++;
if(npduNumFlag) if (npduNumFlag)
ptr2->second.npduNum++; ptr2->second.npduNum++;
// copy to release the mutex // copy to release the mutex
gtpv1u_bearer_t tmp=ptr2->second; gtpv1u_bearer_t tmp = ptr2->second;
pthread_mutex_unlock(&globGtp.gtp_lock); pthread_mutex_unlock(&globGtp.gtp_lock);
if (tmp.outgoing_qfi != -1) { if (tmp.outgoing_qfi != -1) {
Gtpv1uExtHeaderT ext = { 0 }; Gtpv1uExtHeaderT ext = {0};
ext.ExtHeaderLen = 1; // in quad bytes EXT_HDR_LNTH_OCTET_UNITS ext.ExtHeaderLen = 1; // in quad bytes EXT_HDR_LNTH_OCTET_UNITS
ext.pdusession_cntr.spare = 0; ext.pdusession_cntr.spare = 0;
ext.pdusession_cntr.PDU_type = UL_PDU_SESSION_INFORMATION; ext.pdusession_cntr.PDU_type = UL_PDU_SESSION_INFORMATION;
...@@ -331,8 +341,8 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool ...@@ -331,8 +341,8 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool
tmp.outgoing_port, tmp.outgoing_port,
GTP_GPDU, GTP_GPDU,
tmp.teid_outgoing, tmp.teid_outgoing,
buffer, buf,
length, len,
seqNumFlag, seqNumFlag,
npduNumFlag, npduNumFlag,
tmp.seqNum, tmp.seqNum,
...@@ -341,8 +351,20 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool ...@@ -341,8 +351,20 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool
(uint8_t *)&ext, (uint8_t *)&ext,
sizeof(ext)); sizeof(ext));
} else { } else {
gtpv1uCreateAndSendMsg( gtpv1uCreateAndSendMsg(compatInst(instance),
compatInst(instance), tmp.outgoing_ip_addr, tmp.outgoing_port, GTP_GPDU, tmp.teid_outgoing, buffer, length, seqNumFlag, npduNumFlag, tmp.seqNum, tmp.npduNum, NO_MORE_EXT_HDRS, NULL, 0); tmp.outgoing_ip_addr,
tmp.outgoing_port,
GTP_GPDU,
tmp.teid_outgoing,
buf,
len,
seqNumFlag,
npduNumFlag,
tmp.seqNum,
tmp.npduNum,
NO_MORE_EXT_HDRS,
NULL,
0);
} }
} }
...@@ -403,9 +425,10 @@ static void gtpv1uSendDlDeliveryStatus(instance_t instance, gtpv1u_DU_buffer_rep ...@@ -403,9 +425,10 @@ static void gtpv1uSendDlDeliveryStatus(instance_t instance, gtpv1u_DU_buffer_rep
compatInst(instance), tmp.outgoing_ip_addr, tmp.outgoing_port, GTP_GPDU, tmp.teid_outgoing, NULL, 0, false, false, 0, 0, NR_RAN_CONTAINER, extensionHeader->buffer, extensionHeader->length); compatInst(instance), tmp.outgoing_ip_addr, tmp.outgoing_port, GTP_GPDU, tmp.teid_outgoing, NULL, 0, false, false, 0, 0, NR_RAN_CONTAINER, extensionHeader->buffer, extensionHeader->length);
} }
static void gtpv1uEndTunnel(instance_t instance, gtpv1u_tunnel_data_req_t *req) { static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_end_marker_req_t *req)
ue_id_t ue_id=req->ue_id; {
int bearer_id=req->bearer_id; ue_id_t ue_id=req->rnti;
int bearer_id=req->rab_id;
pthread_mutex_lock(&globGtp.gtp_lock); pthread_mutex_lock(&globGtp.gtp_lock);
getInstRetVoid(compatInst(instance)); getInstRetVoid(compatInst(instance));
getUeRetVoid(inst, ue_id); getUeRetVoid(inst, ue_id);
...@@ -1287,11 +1310,6 @@ void *gtpv1uTask(void *args) { ...@@ -1287,11 +1310,6 @@ void *gtpv1uTask(void *args) {
switch (msgType) { switch (msgType) {
// DATA TO BE SENT TO UDP // DATA TO BE SENT TO UDP
case GTPV1U_TUNNEL_DATA_REQ: {
gtpv1uSend(compatInst(myInstance), &GTPV1U_TUNNEL_DATA_REQ(message_p), false, false);
}
break;
case GTPV1U_DU_BUFFER_REPORT_REQ:{ case GTPV1U_DU_BUFFER_REPORT_REQ:{
gtpv1uSendDlDeliveryStatus(compatInst(myInstance), &GTPV1U_DU_BUFFER_REPORT_REQ(message_p)); gtpv1uSendDlDeliveryStatus(compatInst(myInstance), &GTPV1U_DU_BUFFER_REPORT_REQ(message_p));
} }
...@@ -1305,8 +1323,8 @@ void *gtpv1uTask(void *args) { ...@@ -1305,8 +1323,8 @@ void *gtpv1uTask(void *args) {
break; break;
case GTPV1U_ENB_END_MARKER_REQ: case GTPV1U_ENB_END_MARKER_REQ:
gtpv1uEndTunnel(compatInst(myInstance), &GTPV1U_TUNNEL_DATA_REQ(message_p)); gtpv1uEndTunnel(compatInst(myInstance), &GTPV1U_ENB_END_MARKER_REQ(message_p));
itti_free(TASK_GTPV1_U, GTPV1U_TUNNEL_DATA_REQ(message_p).buffer); itti_free(TASK_GTPV1_U, GTPV1U_ENB_END_MARKER_REQ(message_p).buffer);
break; break;
case GTPV1U_ENB_DATA_FORWARDING_REQ: case GTPV1U_ENB_DATA_FORWARDING_REQ:
......
...@@ -103,6 +103,9 @@ extern "C" { ...@@ -103,6 +103,9 @@ extern "C" {
int newGtpuDeleteOneTunnel(instance_t instance, ue_id_t ue_id, int rb_id); int newGtpuDeleteOneTunnel(instance_t instance, ue_id_t ue_id, int rb_id);
int newGtpuDeleteAllTunnels(instance_t instance, ue_id_t ue_id); int newGtpuDeleteAllTunnels(instance_t instance, ue_id_t ue_id);
int newGtpuDeleteTunnels(instance_t instance, ue_id_t ue_id, int nbTunnels, pdusessionid_t *pdusession_id); int newGtpuDeleteTunnels(instance_t instance, ue_id_t ue_id, int nbTunnels, pdusessionid_t *pdusession_id);
void gtpv1uSendDirect(instance_t instance, ue_id_t ue_id, int bearer_id, uint8_t *buf, size_t len, bool seqNumFlag, bool npduNumFlag);
instance_t gtpv1Init(openAddr_t context); instance_t gtpv1Init(openAddr_t context);
void *gtpv1uTask(void *args); void *gtpv1uTask(void *args);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment