Commit 719752ca authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/int-f1-ue-ctxt-setup' into integration_2023_w18b

No related merge requests found
......@@ -425,28 +425,26 @@ pipeline {
}
}
}
//avra is offline, re-enable once it is available
//stage ("T1-Offload-Test") {
// when { expression {do5Gtest} }
// steps {
// script {
// triggerSlaveJob ('RAN-T1-Offload-Test', 'T1-Offload-Test')
// }
// }
// post {
// always {
// script {
// finalizeSlaveJob('RAN-T1-Offload-Test')
// }
// }
// failure {
// script {
// currentBuild.result = 'FAILURE'
// }
// }
// }
//}
stage ("Interop-F1") {
when { expression {do5Gtest} }
steps {
script {
triggerSlaveJob ('RAN-Interop-F1', 'Interop-F1')
}
}
post {
always {
script {
finalizeSlaveJob('RAN-Interop-F1')
}
}
failure {
script {
currentBuild.result = 'FAILURE'
}
}
}
}
}
}
stage ("DockerHub-Push") {
......
<!--
Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The OpenAirInterface Software Alliance licenses this file to You under
the OAI Public License, Version 1.1 (the "License"); you may not use this file
except in compliance with the License.
You may obtain a copy of the License at
http://www.openairinterface.org/?page_id=698
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information about the OpenAirInterface (OAI) Software Alliance:
contact@openairinterface.org
-->
<testCaseList>
<htmlTabRef>rfsim-5gnr-f1-accelleran</htmlTabRef>
<htmlTabName>F1 DU Interop Accelleran CU</htmlTabName>
<htmlTabIcon>wrench</htmlTabIcon>
<TestCaseRequestedList>
111111
000020
000021
020021
100021
100022
</TestCaseRequestedList>
<TestCaseExclusionList></TestCaseExclusionList>
<testCase id="111111">
<class>Pull_Local_Registry</class>
<desc>Pull Images from Local Registry</desc>
<test_svr_id>0</test_svr_id>
<images_to_pull>oai-gnb oai-nr-ue</images_to_pull>
</testCase>
<testCase id="000020">
<class>DeployGenObject</class>
<desc>Deploy OAI-DU</desc>
<yaml_path>yaml_files/5g_rfsimulator_accelleran</yaml_path>
<services>oai-du</services>
<nb_healthy>1</nb_healthy>
</testCase>
<testCase id="000021">
<class>DeployGenObject</class>
<desc>Deploy nrUE</desc>
<yaml_path>yaml_files/5g_rfsimulator_accelleran</yaml_path>
<services>oai-nr-ue</services>
<nb_healthy>2</nb_healthy>
</testCase>
<testCase id="020021">
<class>PingFromContainer</class>
<desc>Test Connectivity (ping)</desc>
<container_name>rfsim5g-oai-nr-ue</container_name>
<options>-I oaitun_ue1 -c 20 12.1.1.1</options>
<loss_threshold>0</loss_threshold>
</testCase>
<testCase id="100022">
<class>UndeployGenObject</class>
<desc>Undeploy all OAI 5G stack</desc>
<yaml_path>yaml_files/5g_rfsimulator_accelleran</yaml_path>
</testCase>
</testCaseList>
<!--
Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The OpenAirInterface Software Alliance licenses this file to You under
the OAI Public License, Version 1.1 (the "License"); you may not use this file
except in compliance with the License.
You may obtain a copy of the License at
http://www.openairinterface.org/?page_id=698
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information about the OpenAirInterface (OAI) Software Alliance:
contact@openairinterface.org
-->
<testCaseList>
<htmlTabRef>rfsim-5gnr-down-f1-accelleran</htmlTabRef>
<htmlTabName>CleanUp</htmlTabName>
<htmlTabIcon>trash</htmlTabIcon>
<TestCaseRequestedList>
100022
222222
</TestCaseRequestedList>
<TestCaseExclusionList></TestCaseExclusionList>
<testCase id="100022">
<class>UndeployGenObject</class>
<desc>Undeploy all OAI 5G stack</desc>
<yaml_path>yaml_files/5g_rfsimulator_accelleran</yaml_path>
</testCase>
<testCase id="222222">
<class>Clean_Test_Server_Images</class>
<desc>Clean Test Images on Test Server</desc>
<test_svr_id>0</test_svr_id>
</testCase>
</testCaseList>
version: '3.8'
services:
oai-du:
image: oaisoftwarealliance/oai-gnb:develop
privileged: true
container_name: rfsim5g-oai-du
network_mode: "host"
environment:
RFSIMULATOR: server
USE_VOLUMED_CONF: 'yes'
USE_ADDITIONAL_OPTIONS: --sa --rfsim --MACRLCs.[0].local_n_address 172.21.16.109 --MACRLCs.[0].remote_n_address 172.21.6.22 --log_config.global_log_options level,nocolor,time
volumes:
- ../../conf_files/gnb-du.band78.106prb.rfsim.conf:/opt/oai-gnb/etc/mounted.conf
healthcheck:
test: /bin/bash -c "pgrep nr-softmodem"
interval: 10s
timeout: 5s
retries: 5
oai-nr-ue:
image: oaisoftwarealliance/oai-nr-ue:develop
privileged: true
container_name: rfsim5g-oai-nr-ue
network_mode: "host"
environment:
RFSIMULATOR: 127.0.0.1
FULL_IMSI: '208990100001140'
FULL_KEY: 'fec86ba6eb707ed08905757b1bb44b8f'
OPC: 'C42449363BBAD02B66D16BC975D77CC1'
DNN: oai
NSSAI_SST: 1
USE_ADDITIONAL_OPTIONS: --sa --rfsim -r 106 --numerology 1 -C 3619200000 --log_config.global_log_options level,nocolor,time
depends_on:
- oai-du
healthcheck:
test: /bin/bash -c "pgrep nr-uesoftmodem"
interval: 10s
timeout: 5s
retries: 5
......@@ -11,6 +11,7 @@
</td>
</tr>
</table>
# 1. Introduction
We use OpenAirInterface source code, and it's regular deployment scheme.
......@@ -208,3 +209,46 @@ DU rlc north output goes to du_pdcp_data_ind() that push a outgoing packet reque
CU pdcp south output calls cu_rlc_data_ind() that do the same symetric processing.
# High-level F1-C code structure
The F1 interface is used internally between CU (mostly RRC) and DU (mostly MAC)
to exchange information. In DL, the CU sends messages as defined by the
callbacks in `mac_rrc_dl.h`, whose implementation is defined in files
`mac_rrc_dl_direct.c` (monolithic) and `mac_rrc_dl_f1ap.c` (for F1AP). In the
monolithic case, the RRC calls directly into the message handler on the DU side
(`mac_rrc_dl_handler.c`). In the F1 case, an ITTI message is sent to the CU
task, sending an ASN.1-encoded F1AP message. The DU side's DU task decodes the
message, and then calls the corresponding handler in `mac_rrc_dl_handler.c`.
Thus, the message flow is the same in both F1 and monolithic cases, with the
difference that F1AP encodes the messages using ASN.1 and sends over a socket.
In UL, the callbacks defined in `mac_rrc_ul.h` are implemented by
`mac_rrc_ul_direct.c` (monolithic) and `mac_rrc_ul_f1ap.c` (F1). In the direct
case, an ITTI message is directly sent to the RRC task (hence, there is no
dedicated handler). In F1, the DU task receives the ITTI message, encodes using
ASN.1, and sends it over a network socket. The CU task decodes, and sends the
same ITTI message to the RRC task as done directly in the monolithic case.
```
+-------------+
| |
| CU/RRC |
| |
+-------------+
| ^
Callback def: mac_rrc_dl.h | | No handler needed:
F1 impl: mac_rrc_dl_f1ap.c | | RRC has ITTI
Monolithic: mac_rrc_dl_direct.c | |
| |
DL | | UL
| |
| | Callback def: mac_rrc_ul.h
Message handler: | | F1 impl: mac_rrc_ul_f1ap.c
mac_rrc_dl_handler.c | | Monolithic: mac_rrc_ul_direct.c
v |
+-------------+
| |
| DU/MAC |
| |
+-------------+
```
......@@ -82,6 +82,9 @@ Webhook
- [RAN-gNB-N300-Timing-Phytest-LDPC](https://jenkins-oai.eurecom.fr/view/RAN/job/RAN-gNB-N300-Timing-Phytest-LDPC/)
- caracal + N310
- pure performance test through phy-test scheduler, see command line for more details
- [RAN-Interop-F1](https://jenkins-oai.eurecom.fr/job/RAN-Interop-F1/)
- ofqot (DU, 1x UE)
- F1 interoperability testing: sets up connection to Accelleran CU, UE connection and connectivity test
- [RAN-L2-Sim-Test-5G](https://jenkins-oai.eurecom.fr/job/RAN-L2-Sim-Test-4G/)
- obelix (eNB, 1x UE, OAI EPC)
- L2simulator: skips physical layer and uses proxy between eNB and UE
......
......@@ -1010,7 +1010,10 @@ int main(int argc, char **argv)
UE_info->UE_sched_ctrl.harq_processes[harq_pid].ndi = !(trial&1);
UE_info->UE_sched_ctrl.harq_processes[harq_pid].round = round;
// nr_schedule_ue_spec() requires the mutex to be locked
NR_SCHED_LOCK(&gNB_mac->sched_lock);
nr_schedule_ue_spec(0, frame, slot, &Sched_INFO->DL_req, &Sched_INFO->TX_req);
NR_SCHED_UNLOCK(&gNB_mac->sched_lock);
Sched_INFO->module_id = 0;
Sched_INFO->CC_id = 0;
Sched_INFO->frame = frame;
......
......@@ -35,6 +35,7 @@
#include "f1ap_decoder.h"
#include "f1ap_itti_messaging.h"
#include "f1ap_du_ue_context_management.h"
#include "openair2/LAYER2/NR_MAC_gNB/mac_rrc_dl_handler.h"
#include "rrc_extern.h"
#include "openair2/RRC/NR/rrc_gNB_UE_context.h"
......@@ -65,13 +66,13 @@ bool lteDURecvCb(protocol_ctxt_t *ctxt_pP,
int DU_handle_UE_CONTEXT_SETUP_REQUEST(instance_t instance,
uint32_t assoc_id,
uint32_t stream,
F1AP_F1AP_PDU_t *pdu) {
MessageDef *msg_p; // message to RRC
F1AP_F1AP_PDU_t *pdu)
{
F1AP_UEContextSetupRequest_t *container;
int i;
DevAssert(pdu);
msg_p = itti_alloc_new_message(TASK_DU_F1, 0, F1AP_UE_CONTEXT_SETUP_REQ);
f1ap_ue_context_setup_t *f1ap_ue_context_setup_req = &F1AP_UE_CONTEXT_SETUP_REQ(msg_p);
f1ap_ue_context_setup_t ue_context_setup = {0};
f1ap_ue_context_setup_t *f1ap_ue_context_setup_req = &ue_context_setup;
container = &pdu->choice.initiatingMessage->value.choice.UEContextSetupRequest;
/* GNB_CU_UE_F1AP_ID */
F1AP_UEContextSetupRequestIEs_t *ieCU;
......@@ -224,8 +225,7 @@ int DU_handle_UE_CONTEXT_SETUP_REQUEST(instance_t instance,
LOG_W(F1AP, "can't find RRCContainer in UEContextSetupRequestIEs by id %ld \n", F1AP_ProtocolIE_ID_id_RRCContainer);
}
itti_send_msg_to_task(TASK_RRC_GNB, instance, msg_p);
ue_context_setup_request(f1ap_ue_context_setup_req);
return 0;
}
......
......@@ -99,73 +99,52 @@ static void process_rlcBearerConfig(struct NR_CellGroupConfig__rlc_BearerToAddMo
static void process_drx_Config(NR_UE_sched_ctrl_t *sched_ctrl, NR_SetupRelease_DRX_Config_t *drx_Config)
{
if (!drx_Config) return;
AssertFatal(drx_Config->present != NR_SetupRelease_DRX_Config_PR_NOTHING, "Cannot have NR_SetupRelease_DRX_Config_PR_NOTHING\n");
if (drx_Config->present == NR_SetupRelease_DRX_Config_PR_setup) {
LOG_I(NR_MAC,"Adding DRX config\n");
}
else {
LOG_I(NR_MAC,"Removing DRX config\n");
}
AssertFatal(false, "%s() not implemented\n", __func__);
AssertFatal(drx_Config->present != NR_SetupRelease_DRX_Config_PR_NOTHING, "Cannot have NR_SetupRelease_DRX_Config_PR_NOTHING\n");
}
static void process_schedulingRequestConfig(NR_UE_sched_ctrl_t *sched_ctrl, NR_SchedulingRequestConfig_t *schedulingRequestConfig)
{
if (!schedulingRequestConfig) return;
LOG_I(NR_MAC,"Adding SchedulingRequestconfig\n");
AssertFatal(false, "%s() not implemented\n", __func__);
}
static void process_bsrConfig(NR_UE_sched_ctrl_t *sched_ctrl, NR_BSR_Config_t *bsr_Config)
{
if (!bsr_Config) return;
LOG_I(NR_MAC,"Adding BSR config\n");
AssertFatal(false, "%s() not implemented\n", __func__);
}
static void process_tag_Config(NR_UE_sched_ctrl_t *sched_ctrl, NR_TAG_Config_t *tag_Config)
{
if (!tag_Config) return;
LOG_I(NR_MAC,"Adding TAG config\n");
AssertFatal(false, "%s() not implemented\n", __func__);
}
static void process_phr_Config(NR_UE_sched_ctrl_t *sched_ctrl, NR_SetupRelease_PHR_Config_t *phr_Config)
{
if (!phr_Config) return;
AssertFatal(phr_Config->present != NR_SetupRelease_PHR_Config_PR_NOTHING, "Cannot have NR_SetupRelease_PHR_Config_PR_NOTHING\n");
if (phr_Config->present == NR_SetupRelease_PHR_Config_PR_setup) {
LOG_I(NR_MAC,"Adding PHR config\n");
}
else {
LOG_I(NR_MAC,"Removing PHR config\n");
}
AssertFatal(false, "%s() not implemented\n", __func__);
}
void process_CellGroup(NR_CellGroupConfig_t *CellGroup, NR_UE_sched_ctrl_t *sched_ctrl)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[0]->sched_lock);
AssertFatal(CellGroup, "CellGroup is null\n");
NR_MAC_CellGroupConfig_t *mac_CellGroupConfig = CellGroup->mac_CellGroupConfig;
if (mac_CellGroupConfig) {
process_drx_Config(sched_ctrl,mac_CellGroupConfig->drx_Config);
process_schedulingRequestConfig(sched_ctrl,mac_CellGroupConfig->schedulingRequestConfig);
process_bsrConfig(sched_ctrl,mac_CellGroupConfig->bsr_Config);
process_tag_Config(sched_ctrl,mac_CellGroupConfig->tag_Config);
process_phr_Config(sched_ctrl,mac_CellGroupConfig->phr_Config);
}
else {
// apply defaults
//process_drx_Config(sched_ctrl,mac_CellGroupConfig->drx_Config);
//process_schedulingRequestConfig(sched_ctrl,mac_CellGroupConfig->schedulingRequestConfig);
//process_bsrConfig(sched_ctrl,mac_CellGroupConfig->bsr_Config);
//process_tag_Config(sched_ctrl,mac_CellGroupConfig->tag_Config);
//process_phr_Config(sched_ctrl,mac_CellGroupConfig->phr_Config);
}
process_rlcBearerConfig(CellGroup->rlc_BearerToAddModList,CellGroup->rlc_BearerToReleaseList,sched_ctrl);
}
void config_common(gNB_MAC_INST *nrmac, int pdsch_AntennaPorts, int pusch_AntennaPorts, NR_ServingCellConfigCommon_t *scc) {
static void config_common(gNB_MAC_INST *nrmac, int pdsch_AntennaPorts, int pusch_AntennaPorts, NR_ServingCellConfigCommon_t *scc)
{
nfapi_nr_config_request_scf_t *cfg = &nrmac->config[0];
nrmac->common_channels[0].ServingCellConfigCommon = scc;
......@@ -460,19 +439,23 @@ void config_common(gNB_MAC_INST *nrmac, int pdsch_AntennaPorts, int pusch_Antenn
}
}
int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_SubcarrierSpacing_t subcarrierSpacing, uint32_t rrc_reconfiguration_delay) {
int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_SubcarrierSpacing_t subcarrierSpacing, uint32_t rrc_reconfiguration_delay)
{
if (rrc_reconfiguration_delay == 0) {
return -1;
}
NR_UE_info_t *UE_info = find_nr_UE(&RC.nrmac[Mod_idP]->UE_info,rnti);
gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t *UE_info = find_nr_UE(&nrmac->UE_info,rnti);
if (!UE_info) {
LOG_W(NR_MAC, "Could not find UE for RNTI 0x%04x\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return -1;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl;
const uint16_t sl_ahead = RC.nrmac[Mod_idP]->if_inst->sl_ahead;
const uint16_t sl_ahead = nrmac->if_inst->sl_ahead;
sched_ctrl->rrc_processing_timer = (rrc_reconfiguration_delay<<subcarrierSpacing) + sl_ahead;
LOG_I(NR_MAC, "Activating RRC processing timer for UE %04x with %d ms\n", UE_info->rnti, rrc_reconfiguration_delay);
......@@ -480,8 +463,9 @@ int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_S
// processing timer. To prevent this, set a variable as if we would have just
// sent it. This way, another TA command will for sure be sent in some
// frames, after RRC processing timer.
sched_ctrl->ta_frame = (RC.nrmac[Mod_idP]->frame - 1 + 1024) % 1024;
sched_ctrl->ta_frame = (nrmac->frame - 1 + 1024) % 1024;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return 0;
}
......@@ -494,6 +478,7 @@ void nr_mac_config_scc(gNB_MAC_INST *nrmac,
{
DevAssert(nrmac != NULL);
AssertFatal(nrmac->common_channels[0].ServingCellConfigCommon == NULL, "logic error: multiple configurations of SCC\n");
NR_SCHED_LOCK(&nrmac->sched_lock);
DevAssert(scc != NULL);
AssertFatal(scc->ssb_PositionsInBurst->present > 0 && scc->ssb_PositionsInBurst->present < 4,
......@@ -573,26 +558,31 @@ void nr_mac_config_scc(gNB_MAC_INST *nrmac,
ra->preambles.preamble_list[i] = i;
}
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_mac_config_mib(gNB_MAC_INST *nrmac, NR_BCCH_BCH_Message_t *mib)
{
DevAssert(nrmac != NULL);
DevAssert(mib != NULL);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
AssertFatal(cc->mib == NULL, "logic bug: updated MIB multiple times\n");
cc->mib = mib;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_mac_config_sib1(gNB_MAC_INST *nrmac, NR_BCCH_DL_SCH_Message_t *sib1)
{
DevAssert(nrmac != NULL);
DevAssert(sib1 != NULL);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
AssertFatal(cc->sib1 == NULL, "logic bug: updated SIB1 multiple times\n");
cc->sib1 = sib1;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
bool nr_mac_add_test_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
......@@ -600,16 +590,17 @@ bool nr_mac_add_test_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t
DevAssert(nrmac != NULL);
DevAssert(CellGroup != NULL);
DevAssert(get_softmodem_params()->phy_test);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t* UE = add_new_nr_ue(nrmac, rnti, CellGroup);
if (UE) {
LOG_I(NR_MAC,"Force-added new UE %x with initial CellGroup\n", rnti);
process_CellGroup(CellGroup,&UE->UE_sched_ctrl);
} else {
LOG_E(NR_MAC,"Error adding UE %04x\n", rnti);
return false;
}
process_CellGroup(CellGroup,&UE->UE_sched_ctrl);
return true;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return UE != NULL;
}
bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
......@@ -617,6 +608,7 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
DevAssert(nrmac != NULL);
DevAssert(CellGroup != NULL);
DevAssert(!get_softmodem_params()->phy_test);
NR_SCHED_LOCK(&nrmac->sched_lock);
// NSA case: need to pre-configure CFRA
const int CC_id = 0;
......@@ -628,6 +620,7 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
}
if (ra_index == NR_NB_RA_PROC_MAX) {
LOG_E(NR_MAC, "RA processes are not available for CFRA RNTI %04x\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return false;
}
NR_RA_t *ra = &cc->ra[ra_index];
......@@ -656,12 +649,16 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
}
}
LOG_I(NR_MAC,"Added new RA process for UE RNTI %04x with initial CellGroup\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return true;
}
bool nr_mac_update_cellgroup(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
{
DevAssert(nrmac != NULL);
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
DevAssert(CellGroup != NULL);
NR_UE_info_t *UE = find_nr_UE(&nrmac->UE_info, rnti);
......@@ -677,58 +674,7 @@ bool nr_mac_update_cellgroup(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCon
exit(1);
}
nr_mac_update_RA(nrmac, rnti, CellGroup);
process_CellGroup(CellGroup, &UE->UE_sched_ctrl);
return true;
}
bool nr_mac_update_RA(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
{
// Checking for free RA process
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
uint8_t ra_index = 0;
for (; ra_index < NR_NB_RA_PROC_MAX; ra_index++) {
if ((cc->ra[ra_index].state == RA_IDLE) && (!cc->ra[ra_index].cfra))
break;
}
if (ra_index == NR_NB_RA_PROC_MAX) {
LOG_E(NR_MAC, "%s() %s:%d RA processes are not available for CFRA RNTI :%x\n", __FUNCTION__, __FILE__, __LINE__, rnti);
return -1;
}
NR_RA_t *ra = &cc->ra[ra_index];
if (CellGroup->spCellConfig && CellGroup->spCellConfig->reconfigurationWithSync
&& CellGroup->spCellConfig->reconfigurationWithSync->rach_ConfigDedicated != NULL) {
if (CellGroup->spCellConfig->reconfigurationWithSync->rach_ConfigDedicated->choice.uplink->cfra != NULL) {
ra->cfra = true;
ra->rnti = rnti;
ra->CellGroup = CellGroup;
struct NR_CFRA *cfra = CellGroup->spCellConfig->reconfigurationWithSync->rach_ConfigDedicated->choice.uplink->cfra;
uint8_t num_preamble = cfra->resources.choice.ssb->ssb_ResourceList.list.count;
ra->preambles.num_preambles = num_preamble;
ra->preambles.preamble_list = (uint8_t *)malloc(num_preamble * sizeof(uint8_t));
for (int i = 0; i < cc->num_active_ssb; i++) {
for (int j = 0; j < num_preamble; j++) {
if (cc->ssb_index[i] == cfra->resources.choice.ssb->ssb_ResourceList.list.array[j]->ssb) {
// One dedicated preamble for each beam
ra->preambles.preamble_list[i] = cfra->resources.choice.ssb->ssb_ResourceList.list.array[j]->ra_PreambleIndex;
break;
}
}
}
}
} else {
ra->cfra = false;
ra->rnti = 0;
if (ra->preambles.preamble_list == NULL) {
ra->preambles.num_preambles = MAX_NUM_NR_PRACH_PREAMBLES;
ra->preambles.preamble_list = (uint8_t *)malloc(MAX_NUM_NR_PRACH_PREAMBLES * sizeof(uint8_t));
for (int i = 0; i < MAX_NUM_NR_PRACH_PREAMBLES; i++)
ra->preambles.preamble_list[i] = i;
}
}
LOG_I(NR_MAC, "Added new %s process for UE RNTI %04x with initial CellGroup\n", ra->cfra ? "CFRA" : "CBRA", rnti);
return true;
}
......@@ -63,6 +63,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
nfapi_nr_tx_data_request_t *TX_req,
nfapi_nr_ul_dci_request_t *UL_dci_req)
{
/* called below and in simulators, so we assume a lock but don't require it */
NR_ServingCellConfigCommon_t *scc = gNB->common_channels->ServingCellConfigCommon;
const int num_slots = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
......@@ -149,6 +150,8 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frame, sub_frame_
NR_COMMON_channels_t *cc = gNB->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
NR_SCHED_LOCK(&gNB->sched_lock);
if (slot==0 && (*scc->downlinkConfigCommon->frequencyInfoDL->frequencyBandList.list.array[0]>=257)) {
//FR2
const NR_TDD_UL_DL_Pattern_t *tdd = &scc->tdd_UL_DL_ConfigurationCommon->pattern1;
......@@ -254,6 +257,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frame, sub_frame_
copy_ul_tti_req(&sched_info->UL_tti_req, &gNB->UL_tti_req_ahead[0][current_index]);
stop_meas(&gNB->eNB_scheduler);
NR_SCHED_UNLOCK(&gNB->sched_lock);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_OUT);
}
......@@ -49,17 +49,32 @@ extern RAN_CONTEXT_t RC;
extern const uint8_t nr_slots_per_frame[5];
extern uint16_t sl_ahead;
// forward declaration of functions used in this file
static void fill_msg3_pusch_pdu(nfapi_nr_pusch_pdu_t *pusch_pdu,
NR_ServingCellConfigCommon_t *scc,
int round,
int startSymbolAndLength,
rnti_t rnti,
int scs,
int bwp_size,
int bwp_start,
int mappingtype,
int fh,
int msg3_first_rb,
int msg3_nb_rb);
static void nr_fill_rar(uint8_t Mod_idP, NR_RA_t *ra, uint8_t *dlsch_buffer, nfapi_nr_pusch_pdu_t *pusch_pdu);
static const uint8_t DELTA[4] = {2, 3, 4, 6};
static const float ssb_per_rach_occasion[8] = {0.125, 0.25, 0.5, 1, 2, 4, 8};
int16_t ssb_index_from_prach(module_id_t module_idP,
frame_t frameP,
sub_frame_t slotP,
uint16_t preamble_index,
uint8_t freq_index,
uint8_t symbol) {
static int16_t ssb_index_from_prach(module_id_t module_idP,
frame_t frameP,
sub_frame_t slotP,
uint16_t preamble_index,
uint8_t freq_index,
uint8_t symbol)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &gNB->common_channels[0];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
......@@ -139,10 +154,12 @@ int16_t ssb_index_from_prach(module_id_t module_idP,
return index;
}
//Compute Total active SSBs and RO available
void find_SSB_and_RO_available(gNB_MAC_INST *nrmac)
{
/* already mutex protected through nr_mac_config_scc() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
nfapi_nr_config_request_scf_t *cfg = &nrmac->config[0];
......@@ -242,6 +259,9 @@ void find_SSB_and_RO_available(gNB_MAC_INST *nrmac)
void schedule_nr_prach(module_id_t module_idP, frame_t frameP, sub_frame_t slotP)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_COMMON_channels_t *cc = gNB->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
int mu;
......@@ -399,15 +419,20 @@ void schedule_nr_prach(module_id_t module_idP, frame_t frameP, sub_frame_t slotP
}
}
void nr_schedule_msg2(uint16_t rach_frame, uint16_t rach_slot,
uint16_t *msg2_frame, uint16_t *msg2_slot,
int mu, NR_ServingCellConfigCommon_t *scc,
frame_type_t frame_type,
uint16_t monitoring_slot_period,
uint16_t monitoring_offset,uint8_t beam_index,
uint8_t num_active_ssb,
int16_t *tdd_beam_association, int sl_ahead){
static void nr_schedule_msg2(uint16_t rach_frame,
uint16_t rach_slot,
uint16_t *msg2_frame,
uint16_t *msg2_slot,
int mu,
NR_ServingCellConfigCommon_t *scc,
frame_type_t frame_type,
uint16_t monitoring_slot_period,
uint16_t monitoring_offset,
uint8_t beam_index,
uint8_t num_active_ssb,
int16_t *tdd_beam_association,
int sl_ahead)
{
// preferentially we schedule the msg2 in the mixed slot or in the last dl slot
// if they are allowed by search space configuration
uint8_t response_window = scc->uplinkConfigCommon->initialUplinkBWP->rach_ConfigCommon->choice.setup->rach_ConfigGeneric.ra_ResponseWindow;
......@@ -516,12 +541,13 @@ void nr_initiate_ra_proc(module_id_t module_idP,
uint16_t preamble_index,
uint8_t freq_index,
uint8_t symbol,
int16_t timing_offset){
int16_t timing_offset)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_SCHED_LOCK(&nr_mac->sched_lock);
uint8_t ul_carrier_id = 0; // 0 for NUL 1 for SUL
uint16_t msg2_frame, msg2_slot,monitoring_slot_period,monitoring_offset;
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &nr_mac->common_channels[CC_id];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
frame_type_t frame_type = cc->frame_type;
......@@ -655,57 +681,22 @@ void nr_initiate_ra_proc(module_id_t module_idP,
cc->ssb_index[beam_index],
i);
NR_SCHED_UNLOCK(&nr_mac->sched_lock);
return;
}
NR_SCHED_UNLOCK(&nr_mac->sched_lock);
LOG_E(NR_MAC, "[gNB %d][RAPROC] FAILURE: CC_id %d Frame %d initiating RA procedure for preamble index %d\n", module_idP, CC_id, frameP, preamble_index);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_INITIATE_RA_PROC, 0);
}
void nr_schedule_RA(module_id_t module_idP,
frame_t frameP,
sub_frame_t slotP,
nfapi_nr_ul_dci_request_t *ul_dci_req,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *mac = RC.nrmac[module_idP];
start_meas(&mac->schedule_ra);
for (int CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
NR_COMMON_channels_t *cc = &mac->common_channels[CC_id];
for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) {
NR_RA_t *ra = &cc->ra[i];
LOG_D(NR_MAC, "RA[state:%d]\n", ra->state);
switch (ra->state) {
case Msg2:
nr_generate_Msg2(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
break;
case Msg3_retransmission:
nr_generate_Msg3_retransmission(module_idP, CC_id, frameP, slotP, ra, ul_dci_req);
break;
case Msg3_dcch_dtch:
nr_generate_Msg3_dcch_dtch_response(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
case Msg4:
nr_generate_Msg4(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
break;
case WAIT_Msg4_ACK:
nr_check_Msg4_Ack(module_idP, CC_id, frameP, slotP, ra);
break;
default:
break;
}
}
}
stop_meas(&mac->schedule_ra);
}
void nr_generate_Msg3_retransmission(module_id_t module_idP,
int CC_id,
frame_t frame,
sub_frame_t slot,
NR_RA_t *ra,
nfapi_nr_ul_dci_request_t *ul_dci_req)
static void nr_generate_Msg3_retransmission(module_id_t module_idP,
int CC_id,
frame_t frame,
sub_frame_t slot,
NR_RA_t *ra,
nfapi_nr_ul_dci_request_t *ul_dci_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &nr_mac->common_channels[CC_id];
......@@ -877,15 +868,14 @@ void nr_generate_Msg3_retransmission(module_id_t module_idP,
}
}
void nr_get_Msg3alloc(module_id_t module_id,
int CC_id,
NR_ServingCellConfigCommon_t *scc,
sub_frame_t current_slot,
frame_t current_frame,
NR_RA_t *ra,
int16_t *tdd_beam_association)
static void nr_get_Msg3alloc(module_id_t module_id,
int CC_id,
NR_ServingCellConfigCommon_t *scc,
sub_frame_t current_slot,
frame_t current_frame,
NR_RA_t *ra,
int16_t *tdd_beam_association)
{
// msg3 is scheduled in mixed slot in the following TDD period
uint16_t msg3_nb_rb = 8; // sdu has 6 or 8 bytes
......@@ -999,16 +989,19 @@ void nr_get_Msg3alloc(module_id_t module_id,
ra->msg3_bwp_start = bwpStart;
}
void fill_msg3_pusch_pdu(nfapi_nr_pusch_pdu_t *pusch_pdu,
NR_ServingCellConfigCommon_t *scc,
int round,
int startSymbolAndLength,
rnti_t rnti, int scs,
int bwp_size, int bwp_start,
int mappingtype, int fh,
int msg3_first_rb, int msg3_nb_rb) {
static void fill_msg3_pusch_pdu(nfapi_nr_pusch_pdu_t *pusch_pdu,
NR_ServingCellConfigCommon_t *scc,
int round,
int startSymbolAndLength,
rnti_t rnti,
int scs,
int bwp_size,
int bwp_start,
int mappingtype,
int fh,
int msg3_first_rb,
int msg3_nb_rb)
{
int start_symbol_index,nr_of_symbols;
SLIV2SL(startSymbolAndLength, &start_symbol_index, &nr_of_symbols);
......@@ -1087,7 +1080,7 @@ void fill_msg3_pusch_pdu(nfapi_nr_pusch_pdu_t *pusch_pdu,
}
}
void nr_add_msg3(module_id_t module_idP, int CC_id, frame_t frameP, sub_frame_t slotP, NR_RA_t *ra, uint8_t *RAR_pdu)
static void nr_add_msg3(module_id_t module_idP, int CC_id, frame_t frameP, sub_frame_t slotP, NR_RA_t *ra, uint8_t *RAR_pdu)
{
gNB_MAC_INST *mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &mac->common_channels[CC_id];
......@@ -1155,13 +1148,14 @@ void nr_add_msg3(module_id_t module_idP, int CC_id, frame_t frameP, sub_frame_t
nr_fill_rar(module_idP, ra, RAR_pdu, pusch_pdu);
}
void nr_generate_Msg2(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
static void nr_generate_Msg2(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &nr_mac->common_channels[CC_id];
......@@ -1427,22 +1421,29 @@ void nr_generate_Msg2(module_id_t module_idP,
}
}
void prepare_dl_pdus(gNB_MAC_INST *nr_mac,
NR_RA_t *ra,
NR_UE_DL_BWP_t *dl_bwp,
nfapi_nr_dl_tti_request_body_t *dl_req,
NR_sched_pucch_t *pucch,
NR_pdsch_dmrs_t dmrs_info,
NR_tda_info_t tda,
int aggregation_level,
int CCEIndex,
int tb_size, int ndi,
int tpc, int delta_PRI,
int current_harq_pid,
int time_domain_assignment,
int CC_id, int rnti, int round,
int mcsIndex, int tb_scaling,
int pduindex, int rbStart, int rbSize)
static void prepare_dl_pdus(gNB_MAC_INST *nr_mac,
NR_RA_t *ra,
NR_UE_DL_BWP_t *dl_bwp,
nfapi_nr_dl_tti_request_body_t *dl_req,
NR_sched_pucch_t *pucch,
NR_pdsch_dmrs_t dmrs_info,
NR_tda_info_t tda,
int aggregation_level,
int CCEIndex,
int tb_size,
int ndi,
int tpc,
int delta_PRI,
int current_harq_pid,
int time_domain_assignment,
int CC_id,
int rnti,
int round,
int mcsIndex,
int tb_scaling,
int pduindex,
int rbStart,
int rbSize)
{
// look up the PDCCH PDU for this CC, BWP, and CORESET. If it does not exist, create it. This is especially
// important if we have multiple RAs, and the DLSCH has to reuse them, so we need to mark them
......@@ -1613,13 +1614,13 @@ void prepare_dl_pdus(gNB_MAC_INST *nr_mac,
LOG_D(NR_MAC,"numDlDci: %i\n", pdcch_pdu_rel15->numDlDci);
}
void nr_generate_Msg3_dcch_dtch_response(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
static void nr_generate_Msg3_dcch_dtch_response(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
......@@ -1799,13 +1800,13 @@ void nr_generate_Msg3_dcch_dtch_response(module_id_t module_idP,
sched_ctrl->ul_failure = 0;
}
void nr_generate_Msg4(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
static void nr_generate_Msg4(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &nr_mac->common_channels[CC_id];
......@@ -2056,8 +2057,8 @@ void nr_generate_Msg4(module_id_t module_idP,
}
}
void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_frame_t slot, NR_RA_t *ra) {
static void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_frame_t slot, NR_RA_t *ra)
{
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[module_id]->UE_info, ra->rnti);
const int current_harq_pid = ra->harq_pid;
......@@ -2098,7 +2099,10 @@ void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_fram
}
}
void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t *ra){
void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t *ra)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_idP]->sched_lock);
LOG_D(NR_MAC,"[gNB %d][RAPROC] CC_id %d Frame %d Clear Random access information rnti %x\n", module_idP, CC_id, frameP, ra->rnti);
ra->state = RA_IDLE;
ra->timing_offset = 0;
......@@ -2141,11 +2145,8 @@ void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t
// - sending only 1 RAR subPDU
// - UL Grant: hardcoded CSI, TPC, time alloc
// - padding
void nr_fill_rar(uint8_t Mod_idP,
NR_RA_t * ra,
uint8_t * dlsch_buffer,
nfapi_nr_pusch_pdu_t *pusch_pdu){
static void nr_fill_rar(uint8_t Mod_idP, NR_RA_t *ra, uint8_t *dlsch_buffer, nfapi_nr_pusch_pdu_t *pusch_pdu)
{
LOG_D(NR_MAC, "[gNB] Generate RAR MAC PDU frame %d slot %d preamble index %u TA command %d \n", ra->Msg2_frame, ra-> Msg2_slot, ra->preamble_index, ra->timing_offset);
NR_RA_HEADER_BI *rarbi = (NR_RA_HEADER_BI *) dlsch_buffer;
NR_RA_HEADER_RAPID *rarh = (NR_RA_HEADER_RAPID *) (dlsch_buffer + 1);
......@@ -2239,3 +2240,43 @@ void nr_fill_rar(uint8_t Mod_idP,
csi_req,
t_crnti);
}
void nr_schedule_RA(module_id_t module_idP,
frame_t frameP,
sub_frame_t slotP,
nfapi_nr_ul_dci_request_t *ul_dci_req,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *mac = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
start_meas(&mac->schedule_ra);
for (int CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
NR_COMMON_channels_t *cc = &mac->common_channels[CC_id];
for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) {
NR_RA_t *ra = &cc->ra[i];
LOG_D(NR_MAC, "RA[state:%d]\n", ra->state);
switch (ra->state) {
case Msg2:
nr_generate_Msg2(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
break;
case Msg3_retransmission:
nr_generate_Msg3_retransmission(module_idP, CC_id, frameP, slotP, ra, ul_dci_req);
break;
case Msg3_dcch_dtch:
nr_generate_Msg3_dcch_dtch_response(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
case Msg4:
nr_generate_Msg4(module_idP, CC_id, frameP, slotP, ra, DL_req, TX_req);
break;
case WAIT_Msg4_ACK:
nr_check_Msg4_Ack(module_idP, CC_id, frameP, slotP, ra);
break;
default:
break;
}
}
}
stop_meas(&mac->schedule_ra);
}
......@@ -51,11 +51,15 @@
extern RAN_CONTEXT_t RC;
void schedule_ssb(frame_t frame, sub_frame_t slot,
NR_ServingCellConfigCommon_t *scc,
nfapi_nr_dl_tti_request_body_t *dl_req,
int i_ssb, uint8_t scoffset, uint16_t offset_pointa, uint32_t payload) {
static void schedule_ssb(frame_t frame,
sub_frame_t slot,
NR_ServingCellConfigCommon_t *scc,
nfapi_nr_dl_tti_request_body_t *dl_req,
int i_ssb,
uint8_t scoffset,
uint16_t offset_pointa,
uint32_t payload)
{
uint8_t beam_index = 0;
nfapi_nr_dl_tti_request_pdu_t *dl_config_pdu = &dl_req->dl_tti_pdu_list[dl_req->nPDUs];
memset((void *) dl_config_pdu, 0,sizeof(nfapi_nr_dl_tti_request_pdu_t));
......@@ -86,12 +90,25 @@ void schedule_ssb(frame_t frame, sub_frame_t slot,
dl_req->nPDUs++;
LOG_D(MAC,"Scheduling ssb %d at frame %d and slot %d\n",i_ssb,frame,slot);
}
static void fill_ssb_vrb_map(NR_COMMON_channels_t *cc, int rbStart, int ssb_subcarrier_offset, uint16_t symStart, int CC_id)
{
AssertFatal(*cc->ServingCellConfigCommon->ssbSubcarrierSpacing !=
NR_SubcarrierSpacing_kHz240,
"240kHZ subcarrier won't work with current VRB map because a single SSB might be across 2 slots\n");
uint16_t *vrb_map = cc[CC_id].vrb_map;
const int extra_prb = ssb_subcarrier_offset > 0;
for (int rb = 0; rb < 20+extra_prb; rb++)
vrb_map[rbStart + rb] = SL_to_bitmap(symStart, 4);
}
void schedule_nr_mib(module_id_t module_idP, frame_t frameP, sub_frame_t slotP, nfapi_nr_dl_tti_request_t *DL_req)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_COMMON_channels_t *cc;
nfapi_nr_dl_tti_request_body_t *dl_req;
NR_MIB_t *mib = RC.nrrrc[module_idP]->carrier.mib->message.choice.mib;
......@@ -254,33 +271,15 @@ void schedule_nr_mib(module_id_t module_idP, frame_t frameP, sub_frame_t slotP,
}
}
void schedule_nr_SI(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP) {
//----------------------------------------
}
void fill_ssb_vrb_map (NR_COMMON_channels_t *cc, int rbStart, int ssb_subcarrier_offset, uint16_t symStart, int CC_id) {
AssertFatal(*cc->ServingCellConfigCommon->ssbSubcarrierSpacing !=
NR_SubcarrierSpacing_kHz240,
"240kHZ subcarrier won't work with current VRB map because a single SSB might be across 2 slots\n");
uint16_t *vrb_map = cc[CC_id].vrb_map;
const int extra_prb = ssb_subcarrier_offset > 0;
for (int rb = 0; rb < 20+extra_prb; rb++)
vrb_map[rbStart + rb] = SL_to_bitmap(symStart, 4);
}
uint32_t schedule_control_sib1(module_id_t module_id,
int CC_id,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
int time_domain_allocation,
NR_pdsch_dmrs_t *dmrs_parms,
NR_tda_info_t *tda_info,
uint8_t candidate_idx,
uint16_t num_total_bytes) {
static uint32_t schedule_control_sib1(module_id_t module_id,
int CC_id,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
int time_domain_allocation,
NR_pdsch_dmrs_t *dmrs_parms,
NR_tda_info_t *tda_info,
uint8_t candidate_idx,
uint16_t num_total_bytes)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[module_id];
NR_COMMON_channels_t *cc = &gNB_mac->common_channels[CC_id];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
......@@ -377,14 +376,14 @@ uint32_t schedule_control_sib1(module_id_t module_id,
return TBS;
}
void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
nfapi_nr_dl_tti_request_body_t *dl_req,
int pdu_index,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
uint32_t TBS,
int StartSymbolIndex,
int NrOfSymbols) {
static void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
nfapi_nr_dl_tti_request_body_t *dl_req,
int pdu_index,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
uint32_t TBS,
int StartSymbolIndex,
int NrOfSymbols)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[Mod_idP];
NR_COMMON_channels_t *cc = gNB_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
......@@ -515,7 +514,6 @@ void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
LOG_D(MAC,"ShiftIndex: %i\n", pdcch_pdu_rel15->ShiftIndex);
LOG_D(MAC,"precoderGranularity: %i\n", pdcch_pdu_rel15->precoderGranularity);
LOG_D(MAC,"numDlDci: %i\n", pdcch_pdu_rel15->numDlDci);
}
void schedule_nr_sib1(module_id_t module_idP,
......@@ -524,6 +522,7 @@ void schedule_nr_sib1(module_id_t module_idP,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
// TODO: Get these values from RRC
const int CC_id = 0;
uint8_t candidate_idx = 0;
......
......@@ -55,6 +55,7 @@
const int get_dl_tda(const gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int slot) {
/* we assume that this function is mutex-protected from outside */
const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL;
AssertFatal(tdd || nrmac->common_channels->frame_type == FDD, "Dynamic TDD not handled yet\n");
......@@ -76,8 +77,12 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
const NR_UE_sched_ctrl_t *ue_sched_ctl,
unsigned char *mac_pdu,
unsigned char drx_cmd,
unsigned char *ue_cont_res_id) {
unsigned char *ue_cont_res_id)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: called below and in _RA.c */
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_MAC_SUBHEADER_FIXED *mac_pdu_ptr = (NR_MAC_SUBHEADER_FIXED *) mac_pdu;
uint8_t last_size = 0;
int offset = 0, mac_ce_size, i, timing_advance_cmd, tag_id = 0;
......@@ -311,10 +316,8 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
return offset;
}
void nr_store_dlsch_buffer(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
static void nr_store_dlsch_buffer(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
UE_iterator(RC.nrmac[module_id]->UE_info.list, UE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
sched_ctrl->num_total_bytes = 0;
......@@ -363,8 +366,9 @@ void nr_store_dlsch_buffer(module_id_t module_id,
}
}
void abort_nr_dl_harq(NR_UE_info_t* UE, int8_t harq_pid) {
void abort_nr_dl_harq(NR_UE_info_t* UE, int8_t harq_pid)
{
/* already mutex protected through handle_dl_harq() */
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[harq_pid];
......@@ -375,13 +379,13 @@ void abort_nr_dl_harq(NR_UE_info_t* UE, int8_t harq_pid) {
}
bool allocate_dl_retransmission(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
uint16_t *rballoc_mask,
int *n_rb_sched,
NR_UE_info_t *UE,
int current_harq_pid)
static bool allocate_dl_retransmission(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
uint16_t *rballoc_mask,
int *n_rb_sched,
NR_UE_info_t *UE,
int current_harq_pid)
{
int CC_id = 0;
......@@ -549,15 +553,14 @@ static int comparator(const void *p, const void *q) {
return ((UEsched_t*)p)->coef < ((UEsched_t*)q)->coef;
}
void pf_dl(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
NR_UE_info_t **UE_list,
int max_num_ue,
int n_rb_sched,
uint16_t *rballoc_mask)
static void pf_dl(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
NR_UE_info_t **UE_list,
int max_num_ue,
int n_rb_sched,
uint16_t *rballoc_mask)
{
gNB_MAC_INST *mac = RC.nrmac[module_id];
NR_ServingCellConfigCommon_t *scc=mac->common_channels[0].ServingCellConfigCommon;
// UEs that could be scheduled
......@@ -784,7 +787,7 @@ void pf_dl(module_id_t module_id,
}
}
void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot)
static void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info;
......@@ -843,6 +846,7 @@ void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
}
nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id) {
/* during initialization: no mutex needed */
/* in the PF algorithm, we have to use the TBsize to compute the coefficient.
* This would include the number of DMRS symbols, which in turn depends on
* the time domain allocation. In case we are in a mixed slot, we do not want
......@@ -876,6 +880,9 @@ void nr_schedule_ue_spec(module_id_t module_id,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
AssertFatal(pthread_mutex_trylock(&gNB_mac->sched_lock) == EBUSY,
"this function should be called with the scheduler mutex locked\n");
if (!is_xlsch_in_slot(gNB_mac->dlsch_slot_bitmap[slot / 64], slot))
return;
......
......@@ -50,6 +50,7 @@ void nr_preprocessor_phytest(module_id_t module_id,
frame_t frame,
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
if (!is_xlsch_in_slot(dlsch_slot_bitmap, slot))
return;
NR_UE_info_t *UE = RC.nrmac[module_id]->UE_info.list[0];
......@@ -192,6 +193,7 @@ uint64_t ulsch_slot_bitmap = (1 << 8);
bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_COMMON_channels_t *cc = nr_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
NR_UE_info_t *UE = nr_mac->UE_info.list[0];
......
......@@ -509,7 +509,7 @@ void fill_pdcch_vrb_map(gNB_MAC_INST *mac,
}
}
bool multiple_2_3_5(int rb)
static bool multiple_2_3_5(int rb)
{
while (rb % 2 == 0)
rb /= 2;
......@@ -2369,7 +2369,7 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
reset_srs_stats(UE);
pthread_mutex_lock(&UE_info->mutex);
NR_SCHED_LOCK(&UE_info->mutex);
int i;
for(i=0; i<MAX_MOBILES_PER_GNB; i++) {
if (UE_info->list[i] == NULL) {
......@@ -2380,10 +2380,10 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
if (i == MAX_MOBILES_PER_GNB) {
LOG_E(NR_MAC,"Try to add UE %04x but the list is full\n", rntiP);
delete_nr_ue_data(UE, nr_mac->common_channels, &UE_info->uid_allocator);
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
return NULL;
}
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
LOG_D(NR_MAC, "Add NR rnti %x\n", rntiP);
dump_nr_list(UE_info->list);
......@@ -2487,8 +2487,11 @@ void reset_ul_harq_list(NR_UE_sched_ctrl_t *sched_ctrl) {
void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
{
/* already mutex protected */
NR_SCHED_ENSURE_LOCKED(&nr_mac->sched_lock);
NR_UEs_t *UE_info = &nr_mac->UE_info;
pthread_mutex_lock(&UE_info->mutex);
NR_SCHED_LOCK(&UE_info->mutex);
UE_iterator(UE_info->list, UE) {
if (UE->rnti==rnti)
break;
......@@ -2496,7 +2499,7 @@ void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
if (!UE) {
LOG_W(NR_MAC,"Call to del rnti %04x, but not existing\n", rnti);
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
return;
}
......@@ -2506,7 +2509,7 @@ void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
if(UE_info->list[i] && UE_info->list[i]->rnti != rnti)
newUEs[newListIdx++]=UE_info->list[i];
memcpy(UE_info->list, newUEs, sizeof(UE_info->list));
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
delete_nr_ue_data(UE, nr_mac->common_channels, &UE_info->uid_allocator);
}
......@@ -2525,6 +2528,7 @@ uint8_t nr_get_tpc(int target, uint8_t cqi, int incr) {
int get_pdsch_to_harq_feedback(NR_PUCCH_Config_t *pucch_Config,
nr_dci_format_t dci_format,
uint8_t *pdsch_to_harq_feedback) {
/* already mutex protected: held in nr_acknack_scheduling() */
if (dci_format == NR_DL_DCI_FORMAT_1_0) {
for (int i = 0; i < 8; i++)
......@@ -2545,6 +2549,9 @@ void nr_csirs_scheduling(int Mod_idP, frame_t frame, sub_frame_t slot, int n_slo
int CC_id = 0;
NR_UEs_t *UE_info = &RC.nrmac[Mod_idP]->UE_info;
gNB_MAC_INST *gNB_mac = RC.nrmac[Mod_idP];
NR_SCHED_ENSURE_LOCKED(&gNB_mac->sched_lock);
uint16_t *vrb_map = gNB_mac->common_channels[CC_id].vrb_map;
UE_info->sched_csirs = false;
......@@ -2770,7 +2777,10 @@ void nr_csirs_scheduling(int Mod_idP, frame_t frame, sub_frame_t slot, int n_slo
void nr_mac_update_timers(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_id]->sched_lock);
NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info;
UE_iterator(UE_info->list, UE) {
......@@ -2824,7 +2834,10 @@ void nr_mac_update_timers(module_id_t module_id,
void schedule_nr_bwp_switch(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_id]->sched_lock);
NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info;
......@@ -2887,6 +2900,7 @@ void send_initial_ul_rrc_message(gNB_MAC_INST *mac, int rnti, const uint8_t *sdu
LOG_W(MAC, "[RAPROC] Received SDU for CCCH length %d for UE %04x\n", sdu_len, rnti);
NR_UE_info_t *UE = (NR_UE_info_t *)rawUE;
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
uint8_t du2cu[1024];
int encoded = encode_cellGroupConfig(UE->CellGroup, du2cu, sizeof(du2cu));
......@@ -2904,6 +2918,7 @@ void send_initial_ul_rrc_message(gNB_MAC_INST *mac, int rnti, const uint8_t *sdu
void prepare_initial_ul_rrc_message(gNB_MAC_INST *mac, NR_UE_info_t *UE)
{
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
/* create this UE's initial CellGroup */
/* Note: relying on the RRC is a hack, as we are in the DU; there should be
* no RRC, remove in the future */
......
......@@ -42,7 +42,8 @@ const uint16_t m_SRS[64] = { 4, 8, 12, 16, 16, 20, 24, 24, 28, 32, 36, 40, 48, 4
160, 160, 168, 176, 184, 192, 192, 192, 192, 208, 216, 224, 240, 240, 240, 240, 256, 256,
256, 264, 272, 272, 272 };
uint32_t max4(uint32_t a, uint32_t b,uint32_t c,uint32_t d) {
static uint32_t max4(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
{
int x = max(a, b);
x = max(x, c);
x = max(x, d);
......@@ -53,6 +54,9 @@ void nr_srs_ri_computation(const nfapi_nr_srs_normalized_channel_iq_matrix_t *nr
const NR_UE_UL_BWP_t *current_BWP,
uint8_t *ul_ri)
{
/* already mutex protected: held in handle_nr_srs_measurements() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[0]->sched_lock);
// If the gNB or UE has 1 antenna, the rank is always 1, i.e., *ul_ri = 0.
// For 2x2 scenario, we compute the rank of channel.
// The computation for 2x4, 4x2, 4x4, ... scenarios are not implemented yet. In these cases, the function sets *ul_ri = 0, which is always a valid value.
......@@ -137,7 +141,14 @@ void nr_srs_ri_computation(const nfapi_nr_srs_normalized_channel_iq_matrix_t *nr
}
void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu, int slot, int module_id, int CC_id, NR_UE_info_t *UE, NR_SRS_ResourceSet_t *srs_resource_set, NR_SRS_Resource_t *srs_resource, int buffer_index)
static void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu,
int slot,
int module_id,
int CC_id,
NR_UE_info_t *UE,
NR_SRS_ResourceSet_t *srs_resource_set,
NR_SRS_Resource_t *srs_resource,
int buffer_index)
{
NR_UE_UL_BWP_t *current_BWP = &UE->current_UL_BWP;
......@@ -196,7 +207,13 @@ void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu, int slot, int module_id, int
vrb_map_UL[i + srs_pdu->bwp_start] |= mask;
}
void nr_fill_nfapi_srs(int module_id, int CC_id, NR_UE_info_t* UE, int frame, int slot, NR_SRS_ResourceSet_t *srs_resource_set, NR_SRS_Resource_t *srs_resource)
static void nr_fill_nfapi_srs(int module_id,
int CC_id,
NR_UE_info_t *UE,
int frame,
int slot,
NR_SRS_ResourceSet_t *srs_resource_set,
NR_SRS_Resource_t *srs_resource)
{
int index = ul_buffer_index(frame, slot, UE->current_UL_BWP.scs, RC.nrmac[module_id]->UL_tti_req_ahead_size);
......@@ -226,8 +243,10 @@ void nr_fill_nfapi_srs(int module_id, int CC_id, NR_UE_info_t* UE, int frame, in
*********************************************************************/
void nr_schedule_srs(int module_id, frame_t frame, int slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
gNB_MAC_INST *nrmac = RC.nrmac[module_id];
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
NR_UEs_t *UE_info = &nrmac->UE_info;
UE_iterator(UE_info->list, UE) {
......
......@@ -144,7 +144,7 @@ static const int diff_rsrp_ssb_csi_meas_10_1_6_1_2[16] = {
-30 // 10 - 15
};
int get_pucch_index(int frame, int slot, int n_slots_frame, const NR_TDD_UL_DL_Pattern_t *tdd, int sched_pucch_size)
static int get_pucch_index(int frame, int slot, int n_slots_frame, const NR_TDD_UL_DL_Pattern_t *tdd, int sched_pucch_size)
{
// PUCCH structures are indexed by slot in the PUCCH period determined by sched_pucch_size number of UL slots
// this functions return the index to the structure for slot passed to the function
......@@ -167,6 +167,9 @@ void nr_schedule_pucch(gNB_MAC_INST *nrmac,
frame_t frameP,
sub_frame_t slotP)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slotP / 64], slotP))
return;
......@@ -198,7 +201,10 @@ void nr_csi_meas_reporting(int Mod_idP,
frame_t frame,
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP];
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
UE_iterator(nrmac->UE_info.list, UE ) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
......@@ -324,8 +330,8 @@ static void handle_dl_harq(NR_UE_info_t * UE,
}
}
int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, NR_UE_info_t * UE) {
static int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, NR_UE_info_t *UE)
{
const NR_PDSCH_Config_t *pdsch_Config = UE->current_DL_BWP.pdsch_Config;
int nb_tci_states = pdsch_Config ? pdsch_Config->tci_StatesToAddModList->list.count : 0;
NR_TCI_State_t *tci =NULL;
......@@ -350,8 +356,8 @@ int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, NR_UE_info_t *
return -1;
}
int checkTargetSSBInTCIStates_pdcchConfig(int ssb_index_t, NR_UE_info_t *UE) {
static int checkTargetSSBInTCIStates_pdcchConfig(int ssb_index_t, NR_UE_info_t *UE)
{
NR_TCI_State_t *tci =NULL;
NR_TCI_StateId_t *tci_id = NULL;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
......@@ -390,7 +396,8 @@ int checkTargetSSBInTCIStates_pdcchConfig(int ssb_index_t, NR_UE_info_t *UE) {
}
//returns the measured RSRP value (upper limit)
int get_measured_rsrp(uint8_t index) {
static int get_measured_rsrp(uint8_t index)
{
//if index is invalid returning minimum rsrp -140
if(index <= 15 || index >= 114)
return MIN_RSRP_VALUE;
......@@ -399,7 +406,7 @@ int get_measured_rsrp(uint8_t index) {
}
//returns the differential RSRP value (upper limit)
int get_diff_rsrp(uint8_t index, int strongest_rsrp) {
static int get_diff_rsrp(uint8_t index, int strongest_rsrp) {
if(strongest_rsrp != -1) {
return strongest_rsrp + diff_rsrp_ssb_csi_meas_10_1_6_1_2[index];
} else
......@@ -409,8 +416,8 @@ int get_diff_rsrp(uint8_t index, int strongest_rsrp) {
//identifies the target SSB Beam index
//keeps the required date for PDCCH and PDSCH TCI state activation/deactivation CE consutruction globally
//handles triggering of PDCCH and PDSCH MAC CEs
void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot) {
static void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot)
{
int strongest_ssb_rsrp = 0;
int cqi_idx = 0;
int curr_ssb_beam_index = 0; //ToDo: yet to know how to identify the serving ssb beam index
......@@ -583,24 +590,23 @@ void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot) {
}
}//tci_presentInDCI
}//is-triggering_beam_switch
}//tci handling
} // tci handling
uint8_t pickandreverse_bits(uint8_t *payload, uint16_t bitlen, uint8_t start_bit) {
static uint8_t pickandreverse_bits(uint8_t *payload, uint16_t bitlen, uint8_t start_bit)
{
uint8_t rev_bits = 0;
for (int i=0; i<bitlen; i++)
rev_bits |= ((payload[(start_bit+i)/8]>>((start_bit+i)%8))&0x01)<<(bitlen-i-1);
return rev_bits;
}
void evaluate_rsrp_report(NR_UE_info_t *UE,
NR_UE_sched_ctrl_t *sched_ctrl,
uint8_t csi_report_id,
uint8_t *payload,
int *cumul_bits,
NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type){
static void evaluate_rsrp_report(NR_UE_info_t *UE,
NR_UE_sched_ctrl_t *sched_ctrl,
uint8_t csi_report_id,
uint8_t *payload,
int *cumul_bits,
NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type)
{
nr_csi_report_t *csi_report = &UE->csi_report_template[csi_report_id];
uint8_t cri_ssbri_bitlen = csi_report->CSI_report_bitlen.cri_ssbri_bitlen;
uint16_t curr_payload;
......@@ -659,21 +665,18 @@ void evaluate_rsrp_report(NR_UE_info_t *UE,
stats->num_rsrp_meas++;
}
void evaluate_cri_report(uint8_t *payload,
uint8_t cri_bitlen,
int cumul_bits,
NR_UE_sched_ctrl_t *sched_ctrl){
static void evaluate_cri_report(uint8_t *payload, uint8_t cri_bitlen, int cumul_bits, NR_UE_sched_ctrl_t *sched_ctrl)
{
uint8_t temp_cri = pickandreverse_bits(payload, cri_bitlen, cumul_bits);
sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.cri = temp_cri;
}
int evaluate_ri_report(uint8_t *payload,
uint8_t ri_bitlen,
uint8_t ri_restriction,
int cumul_bits,
NR_UE_sched_ctrl_t *sched_ctrl){
static int evaluate_ri_report(uint8_t *payload,
uint8_t ri_bitlen,
uint8_t ri_restriction,
int cumul_bits,
NR_UE_sched_ctrl_t *sched_ctrl)
{
uint8_t ri_index = pickandreverse_bits(payload, ri_bitlen, cumul_bits);
int count=0;
for (int i=0; i<8; i++) {
......@@ -689,13 +692,12 @@ int evaluate_ri_report(uint8_t *payload,
AssertFatal(1==0, "Decoded ri %d does not correspond to any valid value in ri_restriction %d\n",ri_index,ri_restriction);
}
void evaluate_cqi_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_info_t *UE,
uint8_t cqi_Table)
static void evaluate_cqi_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_info_t *UE,
uint8_t cqi_Table)
{
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
......@@ -723,13 +725,12 @@ void evaluate_cqi_report(uint8_t *payload,
sched_ctrl->dl_max_mcs = get_mcs_from_cqi(mcs_table, cqi_Table, cqi_idx);
}
uint8_t evaluate_pmi_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_sched_ctrl_t *sched_ctrl){
static uint8_t evaluate_pmi_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_sched_ctrl_t *sched_ctrl)
{
int x1_bitlen = csi_report->csi_meas_bitlen.pmi_x1_bitlen[ri];
int x2_bitlen = csi_report->csi_meas_bitlen.pmi_x2_bitlen[ri];
int tot_bitlen = x1_bitlen + x2_bitlen;
......@@ -744,16 +745,14 @@ uint8_t evaluate_pmi_report(uint8_t *payload,
sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.pmi_x2);
return tot_bitlen;
}
int evaluate_li_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_sched_ctrl_t *sched_ctrl){
static int evaluate_li_report(uint8_t *payload,
nr_csi_report_t *csi_report,
int cumul_bits,
uint8_t ri,
NR_UE_sched_ctrl_t *sched_ctrl)
{
int li_bitlen = csi_report->csi_meas_bitlen.li_bitlen[ri];
if (li_bitlen>0) {
......@@ -762,14 +761,10 @@ int evaluate_li_report(uint8_t *payload,
sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.li = temp_li;
}
return li_bitlen;
}
void skip_zero_padding(int *cumul_bits,
nr_csi_report_t *csi_report,
uint8_t ri,
uint16_t max_bitlen) {
static void skip_zero_padding(int *cumul_bits, nr_csi_report_t *csi_report, uint8_t ri, uint16_t max_bitlen)
{
// actual number of reported bits depends on the reported rank
// zero padding bits are added to have a predetermined max bit length to decode
......@@ -783,13 +778,12 @@ void skip_zero_padding(int *cumul_bits,
*cumul_bits+=(max_bitlen-reported_bitlen);
}
void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig,
const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_pdu,
frame_t frame,
slot_t slot,
NR_UE_info_t *UE,
NR_ServingCellConfigCommon_t *scc)
static void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig,
const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_pdu,
frame_t frame,
slot_t slot,
NR_UE_info_t *UE,
NR_ServingCellConfigCommon_t *scc)
{
/** From Table 6.3.1.1.2-3: RI, LI, CQI, and CRI of codebookType=typeI-SinglePanel */
uint8_t *payload = uci_pdu->csi_part1.csi_part1_payload;
......@@ -929,9 +923,12 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
sub_frame_t slot,
const nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_01)
{
NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_01->rnti);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t * UE = find_nr_UE(&nrmac->UE_info, uci_01->rnti);
if (!UE) {
LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_01->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
......@@ -941,7 +938,7 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
for (int harq_bit = 0; harq_bit < uci_01->harq->num_harq; harq_bit++) {
const uint8_t harq_value = uci_01->harq->harq_list[harq_bit].harq_value;
const uint8_t harq_confidence = uci_01->harq->harq_confidence_level;
NR_UE_harq_t *harq = find_harq(frame, slot, UE, RC.nrmac[mod_id]->dl_bler.harq_round_max);
NR_UE_harq_t *harq = find_harq(frame, slot, UE, nrmac->dl_bler.harq_round_max);
if (!harq) {
LOG_E(NR_MAC, "Oh no! Could not find a harq in %s!\n", __FUNCTION__);
break;
......@@ -950,13 +947,13 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
const int8_t pid = sched_ctrl->feedback_dl_harq.head;
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
LOG_D(NR_MAC,"%4d.%2d bit %d pid %d ack/nack %d\n",frame, slot, harq_bit,pid,harq_value);
handle_dl_harq(UE, pid, harq_value == 0 && harq_confidence == 0, RC.nrmac[mod_id]->dl_bler.harq_round_max);
handle_dl_harq(UE, pid, harq_value == 0 && harq_confidence == 0, nrmac->dl_bler.harq_round_max);
if (harq_confidence == 1) UE->mac_stats.pucch0_DTX++;
}
// tpc (power control) only if we received AckNack
if (uci_01->harq->harq_confidence_level==0)
sched_ctrl->tpc1 = nr_get_tpc(RC.nrmac[mod_id]->pucch_target_snrx10, uci_01->ul_cqi, 30);
sched_ctrl->tpc1 = nr_get_tpc(nrmac->pucch_target_snrx10, uci_01->ul_cqi, 30);
else
sched_ctrl->tpc1 = 3;
sched_ctrl->pucch_snrx10 = uci_01->ul_cqi * 5 - 640;
......@@ -974,6 +971,7 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
}
free(uci_01->sr);
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
......@@ -981,15 +979,21 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
sub_frame_t slot,
const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_234)
{
NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_234->rnti);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t * UE = find_nr_UE(&nrmac->UE_info, uci_234->rnti);
if (!UE) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_234->rnti);
return;
}
NR_CSI_MeasConfig_t *csi_MeasConfig = UE->current_UL_BWP.csi_MeasConfig;
if (csi_MeasConfig==NULL)
if (csi_MeasConfig==NULL) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
// tpc (power control)
......@@ -1014,13 +1018,13 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
DevAssert(harq->is_waiting);
const int8_t pid = sched_ctrl->feedback_dl_harq.head;
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
handle_dl_harq(UE, pid, uci_234->harq.harq_crc != 1 && acknack, RC.nrmac[mod_id]->dl_bler.harq_round_max);
handle_dl_harq(UE, pid, uci_234->harq.harq_crc != 1 && acknack, nrmac->dl_bler.harq_round_max);
}
free(uci_234->harq.harq_payload);
}
if ((uci_234->pduBitmap >> 2) & 0x01) {
//API to parse the csi report and store it into sched_ctrl
extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE, RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon);
extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE, nrmac->common_channels->ServingCellConfigCommon);
//TCI handling function
tci_handling(UE,frame, slot);
free(uci_234->csi_part1.csi_part1_payload);
......@@ -1029,9 +1033,10 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
//@TODO:Handle CSI Report 2
// nothing to free (yet)
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void set_pucch_allocation(const NR_UE_UL_BWP_t *ul_bwp, const int r_pucch, const int bwp_size, NR_sched_pucch_t *pucch)
static void set_pucch_allocation(const NR_UE_UL_BWP_t *ul_bwp, const int r_pucch, const int bwp_size, NR_sched_pucch_t *pucch)
{
if(r_pucch<0){
const NR_PUCCH_Resource_t *resource = ul_bwp->pucch_Config->resourceToAddModList->list.array[0];
......@@ -1053,11 +1058,8 @@ void set_pucch_allocation(const NR_UE_UL_BWP_t *ul_bwp, const int r_pucch, const
}
}
bool test_pucch0_vrb_occupation(const NR_sched_pucch_t *pucch,
uint16_t *vrb_map_UL,
const int bwp_start,
const int bwp_size) {
static bool test_pucch0_vrb_occupation(const NR_sched_pucch_t *pucch, uint16_t *vrb_map_UL, const int bwp_start, const int bwp_size)
{
// We assume initial cyclic shift is always 0 so different pucch resources can't overlap
// verifying occupation of PRBs for ACK/NACK on dedicated pucch
......@@ -1076,9 +1078,7 @@ bool test_pucch0_vrb_occupation(const NR_sched_pucch_t *pucch,
return true;
}
void set_pucch0_vrb_occupation(const NR_sched_pucch_t *pucch,
uint16_t *vrb_map_UL,
const int bwp_start)
static void set_pucch0_vrb_occupation(const NR_sched_pucch_t *pucch, uint16_t *vrb_map_UL, const int bwp_start)
{
for (int l=0; l<pucch->nr_of_symb; l++) {
uint16_t symb = SL_to_bitmap(pucch->start_symb+l, 1);
......@@ -1100,6 +1100,8 @@ int nr_acknack_scheduling(gNB_MAC_INST *mac,
int r_pucch,
int is_common)
{
/* we assume that this function is mutex-protected from outside. Since it is
* called often, don't try to lock every time */
const int CC_id = 0;
const int minfbtime = mac->minRXTXTIMEpdsch;
......@@ -1204,6 +1206,9 @@ int nr_acknack_scheduling(gNB_MAC_INST *mac,
void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slot / 64], slot))
return;
const int CC_id = 0;
......
......@@ -39,7 +39,10 @@
//#define SRS_IND_DEBUG
const int get_ul_tda(gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int frame, int slot) {
const int get_ul_tda(gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int frame, int slot)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
/* there is a mixed slot only when in TDD */
const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL;
......@@ -61,14 +64,8 @@ const int get_ul_tda(gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *sc
return 0; // if FDD or not mixed slot in TDD, for now use default TDA (TODO handle CSI-RS slots)
}
int compute_ph_factor(int mu,
int tbs_bits,
int rb,
int n_layers,
int n_symbols,
int n_dmrs,
long *deltaMCS) {
static int compute_ph_factor(int mu, int tbs_bits, int rb, int n_layers, int n_symbols, int n_dmrs, long *deltaMCS)
{
// 38.213 7.1.1
// if the PUSCH transmission is over more than one layer delta_tf = 0
int delta_tf = 0;
......@@ -109,14 +106,14 @@ int compute_ph_factor(int mu,
// F: length of L is 0:8 or 1:16 bits wide
// R: Reserved bit, set to zero.
int nr_process_mac_pdu(instance_t module_idP,
NR_UE_info_t* UE,
uint8_t CC_id,
frame_t frameP,
sub_frame_t slot,
uint8_t *pduP,
int pdu_len,
const int8_t harq_pid)
static int nr_process_mac_pdu(instance_t module_idP,
NR_UE_info_t *UE,
uint8_t CC_id,
frame_t frameP,
sub_frame_t slot,
uint8_t *pduP,
int pdu_len,
const int8_t harq_pid)
{
uint8_t done = 0;
......@@ -462,7 +459,7 @@ int nr_process_mac_pdu(instance_t module_idP,
return 0;
}
void abort_nr_ul_harq(NR_UE_info_t *UE, int8_t harq_pid)
static void abort_nr_ul_harq(NR_UE_info_t *UE, int8_t harq_pid)
{
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_ul_harq_t *harq = &sched_ctrl->ul_harq_processes[harq_pid];
......@@ -479,7 +476,7 @@ void abort_nr_ul_harq(NR_UE_info_t *UE, int8_t harq_pid)
sched_ctrl->sched_ul_bytes = 0;
}
bool get_UE_waiting_CFRA_msg3(const gNB_MAC_INST *gNB_mac, const int CC_id, const frame_t frame, const sub_frame_t slot)
static bool get_UE_waiting_CFRA_msg3(const gNB_MAC_INST *gNB_mac, const int CC_id, const frame_t frame, const sub_frame_t slot)
{
bool UE_waiting_CFRA_msg3 = false;
for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) {
......@@ -498,17 +495,22 @@ void handle_nr_ul_harq(const int CC_idP,
sub_frame_t slot,
const nfapi_nr_crc_t *crc_pdu)
{
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, crc_pdu->rnti);
bool UE_waiting_CFRA_msg3 = get_UE_waiting_CFRA_msg3(RC.nrmac[mod_id], CC_idP, frame, slot);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t *UE = find_nr_UE(&nrmac->UE_info, crc_pdu->rnti);
bool UE_waiting_CFRA_msg3 = get_UE_waiting_CFRA_msg3(nrmac, CC_idP, frame, slot);
if (!UE || UE_waiting_CFRA_msg3 == true) {
LOG_W(NR_MAC, "handle harq for rnti %04x, in RA process\n", crc_pdu->rnti);
for (int i = 0; i < NR_NB_RA_PROC_MAX; ++i) {
NR_RA_t *ra = &RC.nrmac[mod_id]->common_channels[CC_idP].ra[i];
if (ra->state >= WAIT_Msg3 &&
ra->rnti == crc_pdu->rnti)
NR_RA_t *ra = &nrmac->common_channels[CC_idP].ra[i];
if (ra->state >= WAIT_Msg3 && ra->rnti == crc_pdu->rnti) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
LOG_E(NR_MAC, "%s(): unknown RNTI 0x%04x in PUSCH\n", __func__, crc_pdu->rnti);
return;
}
......@@ -521,8 +523,10 @@ void handle_nr_ul_harq(const int CC_idP,
crc_pdu->harq_id,
harq_pid,
crc_pdu->rnti);
if (harq_pid < 0)
if (harq_pid < 0) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
remove_front_nr_list(&sched_ctrl->feedback_ul_harq);
sched_ctrl->ul_harq_processes[harq_pid].is_waiting = false;
......@@ -562,22 +566,23 @@ void handle_nr_ul_harq(const int CC_idP,
crc_pdu->rnti);
add_tail_nr_list(&sched_ctrl->retrans_ul_harq, harq_pid);
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
/*
* When data are received on PHY and transmitted to MAC
*/
void nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi){
static void _nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[gnb_mod_idP];
const int current_rnti = rntiP;
......@@ -841,8 +846,25 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP,
}
}
uint32_t calc_power_complex(const int16_t *x, const int16_t *y, const uint32_t size) {
void nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[gnb_mod_idP];
NR_SCHED_LOCK(&gNB_mac->sched_lock);
_nr_rx_sdu(gnb_mod_idP, CC_idP, frameP, slotP, rntiP, sduP, sdu_lenP, timing_advance, ul_cqi, rssi);
NR_SCHED_UNLOCK(&gNB_mac->sched_lock);
}
static uint32_t calc_power_complex(const int16_t *x, const int16_t *y, const uint32_t size)
{
// Real part value
int64_t sum_x = 0;
int64_t sum_x2 = 0;
......@@ -864,7 +886,8 @@ uint32_t calc_power_complex(const int16_t *x, const int16_t *y, const uint32_t s
return power_re+power_im;
}
c16_t nr_h_times_w(c16_t h, char w) {
static c16_t nr_h_times_w(c16_t h, char w)
{
c16_t output;
switch (w) {
case '0': // 0
......@@ -893,11 +916,11 @@ c16_t nr_h_times_w(c16_t h, char w) {
return output;
}
uint8_t get_max_tpmi(const NR_PUSCH_Config_t *pusch_Config,
const uint16_t num_ue_srs_ports,
const uint8_t *nrOfLayers,
int *additional_max_tpmi) {
static uint8_t get_max_tpmi(const NR_PUSCH_Config_t *pusch_Config,
const uint16_t num_ue_srs_ports,
const uint8_t *nrOfLayers,
int *additional_max_tpmi)
{
uint8_t max_tpmi = 0;
if ((pusch_Config && pusch_Config->txConfig != NULL && *pusch_Config->txConfig == NR_PUSCH_Config__txConfig_nonCodebook) ||
......@@ -1056,13 +1079,13 @@ uint8_t get_max_tpmi(const NR_PUSCH_Config_t *pusch_Config,
return max_tpmi;
}
void get_precoder_matrix_coef(char *w,
const uint8_t ul_ri,
const uint16_t num_ue_srs_ports,
const long transform_precoding,
const uint8_t tpmi,
const uint8_t uI,
int layer_idx)
static void get_precoder_matrix_coef(char *w,
const uint8_t ul_ri,
const uint16_t num_ue_srs_ports,
const long transform_precoding,
const uint8_t tpmi,
const uint8_t uI,
int layer_idx)
{
if (ul_ri == 0) {
if (num_ue_srs_ports == 2) {
......@@ -1085,15 +1108,16 @@ void get_precoder_matrix_coef(char *w,
}
}
int nr_srs_tpmi_estimation(const NR_PUSCH_Config_t *pusch_Config,
const long transform_precoding,
const uint8_t *channel_matrix,
const uint8_t normalized_iq_representation,
const uint16_t num_gnb_antenna_elements,
const uint16_t num_ue_srs_ports,
const uint16_t prg_size,
const uint16_t num_prgs,
const uint8_t ul_ri) {
static int nr_srs_tpmi_estimation(const NR_PUSCH_Config_t *pusch_Config,
const long transform_precoding,
const uint8_t *channel_matrix,
const uint8_t normalized_iq_representation,
const uint16_t num_gnb_antenna_elements,
const uint16_t num_ue_srs_ports,
const uint16_t prg_size,
const uint16_t num_prgs,
const uint8_t ul_ri)
{
if (ul_ri > 1) {
LOG_D(NR_MAC, "TPMI computation for ul_ri %i is not implemented yet!\n", ul_ri);
return 0;
......@@ -1173,6 +1197,8 @@ void handle_nr_srs_measurements(const module_id_t module_id,
const sub_frame_t slot,
nfapi_nr_srs_indication_pdu_t *srs_ind)
{
gNB_MAC_INST *nrmac = RC.nrmac[module_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
LOG_D(NR_MAC, "(%d.%d) Received SRS indication for UE %04x\n", frame, slot, srs_ind->rnti);
#ifdef SRS_IND_DEBUG
......@@ -1188,11 +1214,13 @@ void handle_nr_srs_measurements(const module_id_t module_id,
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[module_id]->UE_info, srs_ind->rnti);
if (!UE) {
LOG_W(NR_MAC, "Could not find UE for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
if (srs_ind->timing_advance_offset == 0xFFFF) {
LOG_W(NR_MAC, "Invalid timing advance offset for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
......@@ -1210,6 +1238,7 @@ void handle_nr_srs_measurements(const module_id_t module_id,
if (nr_srs_bf_report.wide_band_snr == 0xFF) {
LOG_W(NR_MAC, "Invalid wide_band_snr for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
......@@ -1308,12 +1337,14 @@ void handle_nr_srs_measurements(const module_id_t module_id,
default:
AssertFatal(1 == 0, "Invalid SRS usage\n");
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
long get_K2(NR_PUSCH_TimeDomainResourceAllocationList_t *tdaList,
int time_domain_assignment,
int mu) {
/* we assume that this function is mutex-protected from outside */
NR_PUSCH_TimeDomainResourceAllocation_t *tda = tdaList->list.array[time_domain_assignment];
if (tda->k2)
......@@ -1363,7 +1394,7 @@ static bool nr_UE_is_to_be_scheduled(const NR_ServingCellConfigCommon_t *scc, in
return has_data || sched_ctrl->SR || high_inactivity;
}
void update_ul_ue_R_Qm(int mcs, int mcs_table, const NR_PUSCH_Config_t *pusch_Config, uint16_t *R, uint8_t *Qm)
static void update_ul_ue_R_Qm(int mcs, int mcs_table, const NR_PUSCH_Config_t *pusch_Config, uint16_t *R, uint8_t *Qm)
{
*R = nr_get_code_rate_ul(mcs, mcs_table);
*Qm = nr_get_Qm_ul(mcs, mcs_table);
......@@ -1374,7 +1405,14 @@ void update_ul_ue_R_Qm(int mcs, int mcs_table, const NR_PUSCH_Config_t *pusch_Co
}
}
void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_sched_pusch_t *sched_pusch, NR_UE_UL_BWP_t *ul_bwp, uint16_t minRb, uint32_t tbs, uint16_t *Rb, uint8_t *mcs)
static void nr_ue_max_mcs_min_rb(int mu,
int ph_limit,
NR_sched_pusch_t *sched_pusch,
NR_UE_UL_BWP_t *ul_bwp,
uint16_t minRb,
uint32_t tbs,
uint16_t *Rb,
uint8_t *mcs)
{
AssertFatal(*Rb >= minRb, "illegal Rb %d < minRb %d\n", *Rb, minRb);
AssertFatal(*mcs >= 0 && *mcs <= 28, "illegal MCS %d\n", *mcs);
......@@ -1577,13 +1615,13 @@ static int comparator(const void *p, const void *q) {
return ((UEsched_t*)p)->coef < ((UEsched_t*)q)->coef;
}
void pf_ul(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
NR_UE_info_t *UE_list[],
int max_num_ue,
int n_rb_sched,
uint16_t *rballoc_mask)
static void pf_ul(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
NR_UE_info_t *UE_list[],
int max_num_ue,
int n_rb_sched,
uint16_t *rballoc_mask)
{
const int CC_id = 0;
......@@ -1870,7 +1908,7 @@ void pf_ul(module_id_t module_id,
}
}
bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot)
static bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
NR_COMMON_channels_t *cc = nr_mac->common_channels;
......@@ -1971,6 +2009,7 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
nr_pp_impl_ul nr_init_fr1_ulsch_preprocessor(int CC_id)
{
/* during initialization: no mutex needed */
/* in the PF algorithm, we have to use the TBsize to compute the coefficient.
* This would include the number of DMRS symbols, which in turn depends on
* the time domain allocation. In case we are in a mixed slot, we do not want
......@@ -2002,6 +2041,9 @@ nr_pp_impl_ul nr_init_fr1_ulsch_preprocessor(int CC_id)
void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot, nfapi_nr_ul_dci_request_t *ul_dci_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nr_mac->sched_lock);
/* Uplink data ONLY can be scheduled when the current slot is downlink slot,
* because we have to schedule the DCI0 first before schedule uplink data */
if (!is_xlsch_in_slot(nr_mac->dlsch_slot_bitmap[slot / 64], slot)) {
......
......@@ -40,8 +40,6 @@ void set_cset_offset(uint16_t);
void mac_top_init_gNB(ngran_node_t node_type);
void config_common(gNB_MAC_INST *nrmac, int pdsch_AntennaPorts, int pusch_AntennaPorts, NR_ServingCellConfigCommon_t *scc);
int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP,
rnti_t rnti,
NR_SubcarrierSpacing_t subcarrierSpacing,
......@@ -58,7 +56,6 @@ void nr_mac_config_sib1(gNB_MAC_INST *nrmac, NR_BCCH_DL_SCH_Message_t *sib1);
bool nr_mac_add_test_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup);
bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup);
bool nr_mac_update_cellgroup(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup);
bool nr_mac_update_RA(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup);
void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
int CC_idP,
......@@ -87,15 +84,6 @@ void nr_schedule_ue_spec(module_id_t module_id,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req);
uint32_t schedule_control_sib1(module_id_t module_id,
int CC_id,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
int time_domain_allocation,
NR_pdsch_dmrs_t *dmrs_parms,
NR_tda_info_t *tda_info,
uint8_t candidate_idx,
uint16_t num_total_bytes);
/* \brief default FR1 DL preprocessor init routine, returns preprocessor to call */
nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id);
......@@ -139,41 +127,6 @@ void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t
int nr_allocate_CCEs(int module_idP, int CC_idP, frame_t frameP, sub_frame_t slotP, int test_only);
void nr_get_Msg3alloc(module_id_t module_id,
int CC_id,
NR_ServingCellConfigCommon_t *scc,
sub_frame_t current_subframe,
frame_t current_frame,
NR_RA_t *ra,
int16_t *tdd_beam_association);
void nr_generate_Msg3_retransmission(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_ul_dci_request_t *ul_dci_req);
/* \brief Function in gNB to fill RAR pdu when requested by PHY.
@param ra Instance of RA resources of gNB
@param dlsch_buffer Pointer to RAR input buffer
@param N_RB_UL Number of UL resource blocks
*/
void nr_fill_rar(uint8_t Mod_idP,
NR_RA_t * ra,
uint8_t * dlsch_buffer,
nfapi_nr_pusch_pdu_t *pusch_pdu);
void fill_msg3_pusch_pdu(nfapi_nr_pusch_pdu_t *pusch_pdu,
NR_ServingCellConfigCommon_t *scc,
int round,
int startSymbolAndLength,
rnti_t rnti, int scs,
int bwp_size, int bwp_start,
int mappingtype, int fh,
int msg3_first_rb, int msg3_nb_rb);
void schedule_nr_prach(module_id_t module_idP, frame_t frameP, sub_frame_t slotP);
uint16_t nr_mac_compute_RIV(uint16_t N_RB_DL, uint16_t RBstart, uint16_t Lcrbs);
......@@ -380,39 +333,10 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
unsigned char drx_cmd,
unsigned char *ue_cont_res_id);
void nr_generate_Msg2(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *dl_req,
nfapi_nr_tx_data_request_t *TX_req);
void nr_generate_Msg4(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req);
void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_frame_t slot, NR_RA_t *ra);
void nr_generate_Msg3_dcch_dtch_response(module_id_t module_idP,
int CC_id,
frame_t frameP,
sub_frame_t slotP,
NR_RA_t *ra,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req);
int binomial(int n, int k);
bool is_xlsch_in_slot(uint64_t bitmap, sub_frame_t slot);
void fill_ssb_vrb_map (NR_COMMON_channels_t *cc, int rbStart, int ssb_subcarrier_offset, uint16_t symStart, int CC_id);
/* \brief Function to indicate a received SDU on ULSCH.
@param Mod_id Instance ID of gNB
@param CC_id Component carrier index
......@@ -451,13 +375,6 @@ void handle_nr_srs_measurements(const module_id_t module_id,
const sub_frame_t slot,
nfapi_nr_srs_indication_pdu_t *srs_ind);
int16_t ssb_index_from_prach(module_id_t module_idP,
frame_t frameP,
sub_frame_t slotP,
uint16_t preamble_index,
uint8_t freq_index,
uint8_t symbol);
void find_SSB_and_RO_available(gNB_MAC_INST *nrmac);
NR_pdsch_dmrs_t get_dl_dmrs_params(const NR_ServingCellConfigCommon_t *scc,
......
......@@ -22,12 +22,130 @@
#include "mac_rrc_dl_handler.h"
#include "mac_proto.h"
#include "openair2/RRC/NR/rrc_gNB_UE_context.h"
#include "openair2/LAYER2/nr_rlc/nr_rlc_oai_api.h"
#include "openair2/RRC/NR/MESSAGES/asn1_msg.h"
#include "NR_RRCSetup.h"
#include "NR_DL-CCCH-Message.h"
#include "NR_CellGroupConfig.h"
static NR_RLC_BearerConfig_t *get_bearerconfig_from_srb(const f1ap_srb_to_be_setup_t *srb)
{
long priority = srb->srb_id; // high priority for SRB
e_NR_LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration bucket =
NR_LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms5;
return get_SRB_RLC_BearerConfig(srb->srb_id, priority, bucket);
}
static void handle_ue_context_srbs_setup(const f1ap_ue_context_setup_t *req,
f1ap_ue_context_setup_t *resp,
NR_CellGroupConfig_t *cellGroupConfig)
{
DevAssert(req != NULL && resp != NULL && cellGroupConfig != NULL);
resp->srbs_to_be_setup_length = req->srbs_to_be_setup_length;
resp->srbs_to_be_setup = calloc(req->srbs_to_be_setup_length, sizeof(*resp->srbs_to_be_setup));
AssertFatal(resp->srbs_to_be_setup != NULL, "out of memory\n");
for (int i = 0; i < req->srbs_to_be_setup_length; i++) {
f1ap_srb_to_be_setup_t *srb = &req->srbs_to_be_setup[i];
NR_RLC_BearerConfig_t *rlc_BearerConfig = get_bearerconfig_from_srb(srb);
nr_rlc_add_srb(req->rnti, srb->srb_id, rlc_BearerConfig);
resp->srbs_to_be_setup[i] = *srb;
int ret = ASN_SEQUENCE_ADD(&cellGroupConfig->rlc_BearerToAddModList->list, rlc_BearerConfig);
DevAssert(ret == 0);
}
}
static NR_RLC_BearerConfig_t *get_bearerconfig_from_drb(const f1ap_drb_to_be_setup_t *drb)
{
const NR_RLC_Config_PR rlc_conf = drb->rlc_mode == RLC_MODE_UM ? NR_RLC_Config_PR_um_Bi_Directional : NR_RLC_Config_PR_am;
long priority = 13; // hardcoded for the moment
return get_DRB_RLC_BearerConfig(3 + drb->drb_id, drb->drb_id, rlc_conf, priority);
}
static void handle_ue_context_drbs_setup(const f1ap_ue_context_setup_t *req,
f1ap_ue_context_setup_t *resp,
NR_CellGroupConfig_t *cellGroupConfig)
{
DevAssert(req != NULL && resp != NULL && cellGroupConfig != NULL);
/* Note: the actual GTP tunnels are created in the F1AP breanch of
* ue_context_*_response() */
resp->drbs_to_be_setup_length = req->drbs_to_be_setup_length;
resp->drbs_to_be_setup = calloc(req->drbs_to_be_setup_length, sizeof(*resp->drbs_to_be_setup));
AssertFatal(resp->drbs_to_be_setup != NULL, "out of memory\n");
for (int i = 0; i < req->drbs_to_be_setup_length; i++) {
f1ap_drb_to_be_setup_t *drb = &req->drbs_to_be_setup[i];
NR_RLC_BearerConfig_t *rlc_BearerConfig = get_bearerconfig_from_drb(drb);
nr_rlc_add_drb(req->rnti, drb->drb_id, rlc_BearerConfig);
resp->drbs_to_be_setup[i] = *drb;
int ret = ASN_SEQUENCE_ADD(&cellGroupConfig->rlc_BearerToAddModList->list, rlc_BearerConfig);
DevAssert(ret == 0);
}
}
void ue_context_setup_request(const f1ap_ue_context_setup_t *req)
{
gNB_MAC_INST *mac = RC.nrmac[0];
/* response has same type as request... */
f1ap_ue_context_setup_t resp = {
.gNB_CU_ue_id = req->gNB_CU_ue_id,
.gNB_DU_ue_id = req->gNB_DU_ue_id,
.rnti = req->rnti,
};
if (req->cu_to_du_rrc_information != NULL) {
AssertFatal(req->cu_to_du_rrc_information->cG_ConfigInfo == NULL, "CG-ConfigInfo not handled\n");
AssertFatal(req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList == NULL, "UE capabilities not handled yet\n");
AssertFatal(req->cu_to_du_rrc_information->measConfig == NULL, "MeasConfig not handled\n");
}
NR_SCHED_LOCK(&mac->sched_lock);
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[0]->UE_info, req->rnti);
AssertFatal(UE != NULL, "did not find UE with RNTI %04x, but UE Context Setup Failed not implemented\n", UE->rnti);
if (req->srbs_to_be_setup_length > 0)
handle_ue_context_srbs_setup(req, &resp, UE->CellGroup);
if (req->drbs_to_be_setup_length > 0) {
handle_ue_context_drbs_setup(req, &resp, NULL);
}
if (req->rrc_container != NULL)
nr_rlc_srb_recv_sdu(req->rnti, DCCH, req->rrc_container, req->rrc_container_length);
//nr_mac_update_cellgroup()
resp.du_to_cu_rrc_information = calloc(1, sizeof(du_to_cu_rrc_information_t));
AssertFatal(resp.du_to_cu_rrc_information != NULL, "out of memory\n");
resp.du_to_cu_rrc_information->cellGroupConfig = calloc(1,1024);
AssertFatal(resp.du_to_cu_rrc_information->cellGroupConfig != NULL, "out of memory\n");
asn_enc_rval_t enc_rval = uper_encode_to_buffer(&asn_DEF_NR_CellGroupConfig,
NULL,
UE->CellGroup,
resp.du_to_cu_rrc_information->cellGroupConfig,
1024);
AssertFatal(enc_rval.encoded > 0, "Could not encode CellGroup, failed element %s\n", enc_rval.failed_type->name);
resp.du_to_cu_rrc_information->cellGroupConfig_length = (enc_rval.encoded + 7) >> 3;
/* TODO: need to apply after UE context reconfiguration confirmed? */
process_CellGroup(UE->CellGroup, &UE->UE_sched_ctrl);
NR_SCHED_UNLOCK(&mac->sched_lock);
/* some sanity checks, since we use the same type for request and response */
DevAssert(resp.cu_to_du_rrc_information == NULL);
DevAssert(resp.du_to_cu_rrc_information != NULL);
DevAssert(resp.rrc_container == NULL && resp.rrc_container_length == 0);
mac->mac_rrc.ue_context_setup_response(req, &resp);
/* free the memory we allocated above */
free(resp.srbs_to_be_setup);
free(resp.drbs_to_be_setup);
free(resp.du_to_cu_rrc_information->cellGroupConfig);
free(resp.du_to_cu_rrc_information);
}
int dl_rrc_message(module_id_t module_id, const f1ap_dl_rrc_message_t *dl_rrc)
{
......
......@@ -25,6 +25,8 @@
#include "platform_types.h"
#include "f1ap_messages_types.h"
void ue_context_setup_request(const f1ap_ue_context_setup_t *req);
int dl_rrc_message(module_id_t module_id, const f1ap_dl_rrc_message_t *dl_rrc);
#endif /* MAC_RRC_DL_HANDLER_H */
......@@ -25,6 +25,8 @@
#include "platform_types.h"
#include "f1ap_messages_types.h"
typedef void (*ue_context_setup_response_func_t)(const f1ap_ue_context_setup_t* req, const f1ap_ue_context_setup_t *resp);
typedef void (*initial_ul_rrc_message_transfer_func_t)(module_id_t module_id, const f1ap_initial_ul_rrc_message_t *ul_rrc);
struct nr_mac_rrc_ul_if_s;
......
......@@ -24,6 +24,37 @@
#include "mac_rrc_ul.h"
static void ue_context_setup_response_direct(const f1ap_ue_context_setup_t *req, const f1ap_ue_context_setup_t *resp)
{
DevAssert(req->drbs_to_be_setup_length == resp->drbs_to_be_setup_length);
AssertFatal(req->drbs_to_be_setup_length == 0, "not implemented\n");
(void) req; /* we don't need the request -- it is to set up GTP in F1 case */
MessageDef *msg = itti_alloc_new_message (TASK_MAC_GNB, 0, F1AP_UE_CONTEXT_SETUP_RESP);
f1ap_ue_context_setup_t *f1ap_msg = &F1AP_UE_CONTEXT_SETUP_RESP(msg);
/* copy all fields, but reallocate memory buffers! */
*f1ap_msg = *resp;
if (resp->srbs_to_be_setup_length > 0) {
DevAssert(resp->srbs_to_be_setup != NULL);
f1ap_msg->srbs_to_be_setup_length = resp->srbs_to_be_setup_length;
f1ap_msg->srbs_to_be_setup = calloc(f1ap_msg->srbs_to_be_setup_length, sizeof(*f1ap_msg->srbs_to_be_setup));
for (int i = 0; i < f1ap_msg->srbs_to_be_setup_length; ++i)
f1ap_msg->srbs_to_be_setup[i] = resp->srbs_to_be_setup[i];
}
f1ap_msg->du_to_cu_rrc_information = malloc(sizeof(*resp->du_to_cu_rrc_information));
AssertFatal(f1ap_msg->du_to_cu_rrc_information != NULL, "out of memory\n");
f1ap_msg->du_to_cu_rrc_information_length = resp->du_to_cu_rrc_information_length;
du_to_cu_rrc_information_t *du2cu = f1ap_msg->du_to_cu_rrc_information;
du2cu->cellGroupConfig_length = resp->du_to_cu_rrc_information->cellGroupConfig_length;
du2cu->cellGroupConfig = calloc(du2cu->cellGroupConfig_length, sizeof(*du2cu->cellGroupConfig));
AssertFatal(du2cu->cellGroupConfig != NULL, "out of memory\n");
memcpy(du2cu->cellGroupConfig, resp->du_to_cu_rrc_information->cellGroupConfig, du2cu->cellGroupConfig_length);
itti_send_msg_to_task(TASK_RRC_GNB, 0, msg);
}
static void initial_ul_rrc_message_transfer_direct(module_id_t module_id, const f1ap_initial_ul_rrc_message_t *ul_rrc)
{
MessageDef *msg = itti_alloc_new_message(TASK_MAC_GNB, 0, F1AP_INITIAL_UL_RRC_MESSAGE);
......@@ -46,5 +77,6 @@ static void initial_ul_rrc_message_transfer_direct(module_id_t module_id, const
void mac_rrc_ul_direct_init(struct nr_mac_rrc_ul_if_s *mac_rrc)
{
mac_rrc->ue_context_setup_response = ue_context_setup_response_direct;
mac_rrc->initial_ul_rrc_message_transfer = initial_ul_rrc_message_transfer_direct;
}
......@@ -19,11 +19,47 @@
* conmnc_digit_lengtht@openairinterface.org
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "nr_mac_gNB.h"
#include "intertask_interface.h"
#include "openair3/ocp-gtpu/gtp_itf.h"
#include "mac_rrc_ul.h"
static void ue_context_setup_response_f1ap(const f1ap_ue_context_setup_t *req, const f1ap_ue_context_setup_t *resp)
{
DevAssert(req->drbs_to_be_setup_length == resp->drbs_to_be_setup_length);
AssertFatal(req->drbs_to_be_setup_length == 0, "not implmented\n");
DevAssert(req->srbs_to_be_setup_length == resp->srbs_to_be_setup_length);
MessageDef *msg = itti_alloc_new_message (TASK_MAC_GNB, 0, F1AP_UE_CONTEXT_SETUP_RESP);
f1ap_ue_context_setup_t *f1ap_msg = &F1AP_UE_CONTEXT_SETUP_RESP(msg);
/* copy all fields, but reallocate rrc_containers! */
*f1ap_msg = *resp;
if (resp->srbs_to_be_setup_length > 0) {
DevAssert(resp->srbs_to_be_setup != NULL);
f1ap_msg->srbs_to_be_setup_length = resp->srbs_to_be_setup_length;
f1ap_msg->srbs_to_be_setup = calloc(f1ap_msg->srbs_to_be_setup_length, sizeof(*f1ap_msg->srbs_to_be_setup));
for (int i = 0; i < f1ap_msg->srbs_to_be_setup_length; ++i)
f1ap_msg->srbs_to_be_setup[i] = resp->srbs_to_be_setup[i];
}
f1ap_msg->du_to_cu_rrc_information = malloc(sizeof(*resp->du_to_cu_rrc_information));
AssertFatal(f1ap_msg->du_to_cu_rrc_information != NULL, "out of memory\n");
f1ap_msg->du_to_cu_rrc_information_length = resp->du_to_cu_rrc_information_length;
du_to_cu_rrc_information_t *du2cu = f1ap_msg->du_to_cu_rrc_information;
du2cu->cellGroupConfig_length = resp->du_to_cu_rrc_information->cellGroupConfig_length;
du2cu->cellGroupConfig = calloc(du2cu->cellGroupConfig_length, sizeof(*du2cu->cellGroupConfig));
AssertFatal(du2cu->cellGroupConfig != NULL, "out of memory\n");
memcpy(du2cu->cellGroupConfig, resp->du_to_cu_rrc_information->cellGroupConfig, du2cu->cellGroupConfig_length);
itti_send_msg_to_task(TASK_DU_F1, 0, msg);
}
static void initial_ul_rrc_message_transfer_f1ap(module_id_t module_id, const f1ap_initial_ul_rrc_message_t *ul_rrc)
{
MessageDef *msg = itti_alloc_new_message(TASK_MAC_GNB, 0, F1AP_INITIAL_UL_RRC_MESSAGE);
......@@ -46,6 +82,7 @@ static void initial_ul_rrc_message_transfer_f1ap(module_id_t module_id, const f1
void mac_rrc_ul_f1ap_init(struct nr_mac_rrc_ul_if_s *mac_rrc)
{
mac_rrc->ue_context_setup_response = ue_context_setup_response_f1ap;
mac_rrc->initial_ul_rrc_message_transfer = initial_ul_rrc_message_transfer_f1ap;
}
......@@ -60,7 +60,9 @@ void *nrmac_stats_thread(void *arg) {
while (oai_exit == 0) {
char *p = output;
NR_SCHED_LOCK(&gNB->sched_lock);
p += dump_mac_stats(gNB, p, end - p, false);
NR_SCHED_UNLOCK(&gNB->sched_lock);
p += snprintf(p, end - p, "\n");
p += print_meas_log(&gNB->eNB_scheduler, "DL & UL scheduling timing", NULL, NULL, p, end - p);
p += print_meas_log(&gNB->schedule_dlsch, "dlsch scheduler", NULL, NULL, p, end - p);
......@@ -87,7 +89,11 @@ size_t dump_mac_stats(gNB_MAC_INST *gNB, char *output, size_t strlen, bool reset
const char *begin = output;
const char *end = output + strlen;
pthread_mutex_lock(&gNB->UE_info.mutex);
/* this function is called from gNB_dlsch_ulsch_scheduler(), so assumes the
* scheduler to be locked*/
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_SCHED_LOCK(&gNB->UE_info.mutex);
UE_iterator(gNB->UE_info.list, UE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_mac_stats_t *stats = &UE->mac_stats;
......@@ -175,7 +181,7 @@ size_t dump_mac_stats(gNB_MAC_INST *gNB, char *output, size_t strlen, bool reset
stats->ul.lc_bytes[lc_id]);
}
}
pthread_mutex_unlock(&gNB->UE_info.mutex);
NR_SCHED_UNLOCK(&gNB->UE_info.mutex);
return output - begin;
}
......@@ -233,6 +239,8 @@ void mac_top_init_gNB(ngran_node_t node_type)
RC.nrmac[i]->first_MIB = true;
pthread_mutex_init(&RC.nrmac[i]->sched_lock, NULL);
pthread_mutex_init(&RC.nrmac[i]->UE_info.mutex, NULL);
uid_linear_allocator_init(&RC.nrmac[i]->UE_info.uid_allocator);
......
......@@ -41,6 +41,25 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#define NR_SCHED_LOCK(lock) \
do { \
int rc = pthread_mutex_lock(lock); \
AssertFatal(rc == 0, "error while locking scheduler mutex\n"); \
} while (0)
#define NR_SCHED_UNLOCK(lock) \
do { \
int rc = pthread_mutex_unlock(lock); \
AssertFatal(rc == 0, "error while locking scheduler mutex\n"); \
} while (0)
#define NR_SCHED_ENSURE_LOCKED(lock)\
do {\
int rc = pthread_mutex_trylock(lock); \
AssertFatal(rc == EBUSY, "this function should be called with the scheduler mutex locked\n");\
} while (0)
/* Commmon */
#include "radio/COMMON/common_lib.h"
......@@ -641,6 +660,7 @@ typedef struct NR_bler_options {
typedef struct nr_mac_rrc_ul_if_s {
/* TODO add other message types as necessary */
ue_context_setup_response_func_t ue_context_setup_response;
initial_ul_rrc_message_transfer_func_t initial_ul_rrc_message_transfer;
} nr_mac_rrc_ul_if_t;
......@@ -795,6 +815,8 @@ typedef struct gNB_MAC_INST_s {
int16_t frame;
int16_t slot;
pthread_mutex_t sched_lock;
} gNB_MAC_INST;
#endif /*__LAYER2_NR_MAC_GNB_H__ */
......@@ -55,12 +55,17 @@ void nr_rrc_mac_remove_ue(rnti_t rntiMaybeUEid)
nr_rlc_remove_ue(rntiMaybeUEid);
gNB_MAC_INST *nrmac = RC.nrmac[0];
NR_SCHED_LOCK(&nrmac->sched_lock);
mac_remove_nr_ue(nrmac, rntiMaybeUEid);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_rrc_mac_update_cellgroup(rnti_t rntiMaybeUEid, NR_CellGroupConfig_t *cgc)
{
nr_mac_update_cellgroup(RC.nrmac[0], rntiMaybeUEid, cgc);
gNB_MAC_INST *nrmac = RC.nrmac[0];
NR_SCHED_LOCK(&nrmac->sched_lock);
nr_mac_update_cellgroup(nrmac, rntiMaybeUEid, cgc);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
uint16_t mac_rrc_nr_data_req(const module_id_t Mod_idP,
......
......@@ -25,6 +25,8 @@
#include "platform_types.h"
#include "f1ap_messages_types.h"
typedef void (*ue_context_setup_request_func_t)(const f1ap_ue_context_setup_t *req);
typedef void (*dl_rrc_message_transfer_func_t)(module_id_t module_id, const f1ap_dl_rrc_message_t *dl_rrc);
struct nr_mac_rrc_dl_if_s;
......
......@@ -33,5 +33,6 @@ static void dl_rrc_message_transfer_direct(module_id_t module_id, const f1ap_dl_
void mac_rrc_dl_direct_init(nr_mac_rrc_dl_if_t *mac_rrc)
{
mac_rrc->ue_context_setup_request = ue_context_setup_request;
mac_rrc->dl_rrc_message_transfer = dl_rrc_message_transfer_direct;
}
......@@ -19,9 +19,27 @@
* conmnc_digit_lengtht@openairinterface.org
*/
#include <stdlib.h>
#include "mac_rrc_dl.h"
#include "nr_rrc_defs.h"
static void ue_context_setup_request_f1ap(const f1ap_ue_context_setup_t *req)
{
MessageDef *msg = itti_alloc_new_message(TASK_RRC_GNB, 0, F1AP_UE_CONTEXT_SETUP_REQ);
f1ap_ue_context_setup_t *f1ap_msg = &F1AP_UE_CONTEXT_SETUP_REQ(msg);
*f1ap_msg = *req;
AssertFatal(req->cu_to_du_rrc_information == NULL, "cu_to_du_rrc_information not supported yet\n");
f1ap_msg->cu_to_du_rrc_information = malloc(sizeof(*f1ap_msg->cu_to_du_rrc_information));
if (req->rrc_container_length > 0) {
f1ap_msg->rrc_container = calloc(req->rrc_container_length, sizeof(*f1ap_msg->rrc_container));
AssertFatal(f1ap_msg->rrc_container != NULL, "out of memory\n");
f1ap_msg->rrc_container_length = req->rrc_container_length;
memcpy(f1ap_msg->rrc_container, req->rrc_container, req->rrc_container_length);
}
itti_send_msg_to_task(TASK_CU_F1, 0, msg);
}
static void dl_rrc_message_transfer_f1ap(module_id_t module_id, const f1ap_dl_rrc_message_t *dl_rrc)
{
/* TODO call F1AP function directly? no real-time constraint here */
......@@ -40,5 +58,6 @@ static void dl_rrc_message_transfer_f1ap(module_id_t module_id, const f1ap_dl_rr
void mac_rrc_dl_f1ap_init(nr_mac_rrc_dl_if_t *mac_rrc)
{
mac_rrc->ue_context_setup_request = ue_context_setup_request_f1ap;
mac_rrc->dl_rrc_message_transfer = dl_rrc_message_transfer_f1ap;
}
......@@ -2323,6 +2323,8 @@ NR_CellGroupConfig_t *get_initial_cellGroupConfig(int uid,
return cellGroupConfig;
}
#include "common/ran_context.h"
#include "nr_rrc_defs.h"
void update_cellGroupConfig(NR_CellGroupConfig_t *cellGroupConfig,
const int uid,
NR_UE_NR_Capability_t *uecap,
......@@ -2338,6 +2340,13 @@ void update_cellGroupConfig(NR_CellGroupConfig_t *cellGroupConfig,
return;
DevAssert(configuration->scc != NULL);
/* This is a hack and will be removed once the CellGroupConfig is fully
* handled at the DU */
if (NODE_IS_CU(RC.nrrrc[0]->node_type)) {
LOG_W(RRC, "update of CellGroupConfig not yet supported in F1\n");
return;
}
NR_SpCellConfig_t *SpCellConfig = cellGroupConfig->spCellConfig;
NR_ServingCellConfigCommon_t *scc = configuration->scc;
......
......@@ -481,6 +481,7 @@ typedef struct {
typedef struct nr_mac_rrc_dl_if_s {
/* TODO add other message types as necessary */
ue_context_setup_request_func_t ue_context_setup_request;
dl_rrc_message_transfer_func_t dl_rrc_message_transfer;
} nr_mac_rrc_dl_if_t;
......
......@@ -363,7 +363,6 @@ static void rrc_gNB_generate_RRCSetup(instance_t instance,
// remove UE after 10 frames after RRCConnectionRelease is triggered
ue_context_pP->ue_context.ue_release_timer_thres = 1000;
/* TODO: this should go through the E1 interface */
nr_pdcp_add_srbs(true, rnti, ue_context_pP->ue_context.SRB_configList, 0, NULL, NULL);
f1ap_dl_rrc_message_t dl_rrc = {
......@@ -497,6 +496,7 @@ static void rrc_gNB_generate_defaultRRCReconfiguration(const protocol_ctxt_t *co
/******************** Radio Bearer Config ********************/
gNB_RRC_UE_t *ue_p = &ue_context_pP->ue_context;
AssertFatal(ue_p->nb_of_pdusessions == 0, "logic bug: PDU sessions present before RRC Connection established\n");
struct NR_RRCReconfiguration_v1530_IEs__dedicatedNAS_MessageList *dedicatedNAS_MessageList = CALLOC(1, sizeof(*dedicatedNAS_MessageList));
......@@ -2285,72 +2285,12 @@ rrc_gNB_decode_dcch(
ul_dcch_msg);
}
if (!NODE_IS_CU(gnb_rrc_inst->node_type)) {
if (UE->established_pdu_sessions_flag == 1) {
rrc_gNB_generate_dedicatedRRCReconfiguration(ctxt_pP, ue_context_p, NULL);
} else {
rrc_gNB_generate_defaultRRCReconfiguration(ctxt_pP, ue_context_p);
}
} else {
/*Generate a UE context setup request message towards the DU to provide the UE
*capability info and get the updates on master cell group config from the DU*/
MessageDef *message_p;
message_p = itti_alloc_new_message (TASK_RRC_GNB, 0, F1AP_UE_CONTEXT_SETUP_REQ);
f1ap_ue_context_setup_t *req=&F1AP_UE_CONTEXT_SETUP_REQ (message_p);
//UE_IDs will be extracted from F1AP layer
req->gNB_CU_ue_id = 0;
req->gNB_DU_ue_id = 0;
req->rnti = UE->rnti;
req->mcc = gnb_rrc_inst->configuration.mcc[0];
req->mnc = gnb_rrc_inst->configuration.mnc[0];
req->mnc_digit_length = gnb_rrc_inst->configuration.mnc_digit_length[0];
req->nr_cellid = gnb_rrc_inst->nr_cellid;
if (UE->established_pdu_sessions_flag == 1) {
/*Instruction towards the DU for SRB2 configuration*/
req->srbs_to_be_setup = malloc(1*sizeof(f1ap_srb_to_be_setup_t));
req->srbs_to_be_setup_length = 1;
f1ap_srb_to_be_setup_t *SRBs=req->srbs_to_be_setup;
SRBs[0].srb_id = 2;
SRBs[0].lcid = 2;
/*Instruction towards the DU for DRB configuration and tunnel creation*/
req->drbs_to_be_setup = malloc(1*sizeof(f1ap_drb_to_be_setup_t));
req->drbs_to_be_setup_length = 1;
f1ap_drb_to_be_setup_t *DRBs=req->drbs_to_be_setup;
LOG_I(RRC, "Length of DRB list:%d \n", req->drbs_to_be_setup_length);
DRBs[0].drb_id = 1;
DRBs[0].rlc_mode = RLC_MODE_AM;
DRBs[0].up_ul_tnl[0].tl_address = inet_addr(gnb_rrc_inst->eth_params_s.my_addr);
DRBs[0].up_ul_tnl[0].port = gnb_rrc_inst->eth_params_s.my_portd;
DRBs[0].up_ul_tnl_length = 1;
DRBs[0].up_dl_tnl[0].tl_address = inet_addr(gnb_rrc_inst->eth_params_s.remote_addr);
DRBs[0].up_dl_tnl[0].port = gnb_rrc_inst->eth_params_s.remote_portd;
DRBs[0].up_dl_tnl_length = 1;
}
if( ul_dcch_msg->message.choice.c1->choice.ueCapabilityInformation->criticalExtensions.present ==
NR_UECapabilityInformation__criticalExtensions_PR_ueCapabilityInformation ) {
struct NR_UE_CapabilityRAT_ContainerList *ue_CapabilityRAT_ContainerList =
ul_dcch_msg->message.choice.c1->choice.ueCapabilityInformation->criticalExtensions.choice.ueCapabilityInformation->ue_CapabilityRAT_ContainerList;
if(ue_CapabilityRAT_ContainerList!=NULL){
LOG_I(NR_RRC, "ue_CapabilityRAT_ContainerList is present \n");
req->cu_to_du_rrc_information = calloc(1,sizeof(cu_to_du_rrc_information_t));
req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList = calloc(1,4096);
asn_enc_rval_t enc_rval = uper_encode_to_buffer(&asn_DEF_NR_UE_CapabilityRAT_ContainerList,
NULL,
ue_CapabilityRAT_ContainerList,
req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList,
4096);
AssertFatal (enc_rval.encoded > 0, "ASN1 ue_CapabilityRAT_ContainerList encoding failed (%s, %jd)!\n",
enc_rval.failed_type->name, enc_rval.encoded);
req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList_length = (enc_rval.encoded+7)>>3;
}
else{
LOG_I(NR_RRC, "ue_CapabilityRAT_ContainerList is not present \n");
}
}
itti_send_msg_to_task (TASK_CU_F1, ctxt_pP->module_id, message_p);
}
// we send the UE capabilities request before RRC connection is complete,
// so we cannot have a PDU session yet
AssertFatal(UE->established_pdu_sessions_flag == 0, "logic bug: received capabilities while PDU session established\n");
// TODO: send UE context modification response with UE capabilities to
// allow DU configure CellGroupConfig
rrc_gNB_generate_defaultRRCReconfiguration(ctxt_pP, ue_context_p);
break;
......@@ -2525,233 +2465,6 @@ void rrc_gNB_process_dc_overall_timeout(const module_id_t gnb_mod_idP, x2ap_ENDC
rrc_remove_nsa_user(rrc, m->rnti);
}
static void rrc_DU_process_ue_context_setup_request(MessageDef *msg_p, instance_t instance)
{
f1ap_ue_context_setup_t * req=&F1AP_UE_CONTEXT_SETUP_REQ(msg_p);
gNB_RRC_INST *rrc = RC.nrrrc[instance];
gNB_MAC_INST *mac = RC.nrmac[instance];
rrc_gNB_ue_context_t *ue_context_p = rrc_gNB_get_ue_context_by_rnti(rrc, req->rnti);
gNB_RRC_UE_t *UE = &ue_context_p->ue_context;
MessageDef *message_p;
message_p = itti_alloc_new_message (TASK_RRC_GNB, 0, F1AP_UE_CONTEXT_SETUP_RESP);
f1ap_ue_context_setup_t * resp=&F1AP_UE_CONTEXT_SETUP_RESP(message_p);
uint32_t incoming_teid = 0;
if(req->cu_to_du_rrc_information!=NULL){
if(req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList!=NULL){
LOG_I(NR_RRC, "Length of ue_CapabilityRAT_ContainerList is: %d \n", (int) req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList_length);
struct NR_UE_CapabilityRAT_ContainerList *ue_CapabilityRAT_ContainerList = NULL;
asn_dec_rval_t dec_rval = uper_decode_complete( NULL,
&asn_DEF_NR_UE_CapabilityRAT_ContainerList,
(void **)&ue_CapabilityRAT_ContainerList,
(uint8_t *)req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList,
(int) req->cu_to_du_rrc_information->uE_CapabilityRAT_ContainerList_length);
if ((dec_rval.code != RC_OK) && (dec_rval.consumed == 0)) {
AssertFatal(1==0,"UE Capability RAT ContainerList decode error\n");
// free the memory
SEQUENCE_free( &asn_DEF_NR_UE_CapabilityRAT_ContainerList, ue_CapabilityRAT_ContainerList, 1 );
return;
}
//To fill ue_context.UE_Capability_MRDC, ue_context.UE_Capability_nr ...
int NR_index = -1;
for(int i = 0;i < ue_CapabilityRAT_ContainerList->list.count; i++){
if(ue_CapabilityRAT_ContainerList->list.array[i]->rat_Type ==
NR_RAT_Type_nr){
LOG_I(NR_RRC, "DU received NR_RAT_Type_nr UE capabilities Info through the UE Context Setup Request from the CU \n");
if (UE->UE_Capability_nr) {
ASN_STRUCT_FREE(asn_DEF_NR_UE_NR_Capability, UE->UE_Capability_nr);
UE->UE_Capability_nr = 0;
}
dec_rval = uper_decode(NULL,
&asn_DEF_NR_UE_NR_Capability,
(void **)&UE->UE_Capability_nr,
ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.buf,
ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.size,
0,
0);
if(LOG_DEBUGFLAG(DEBUG_ASN1)){
xer_fprint(stdout, &asn_DEF_NR_UE_NR_Capability, UE->UE_Capability_nr);
}
if((dec_rval.code != RC_OK) && (dec_rval.consumed == 0)){
LOG_E(NR_RRC, "UE %04x Failed to decode nr UE capabilities (%zu bytes)\n", req->rnti, dec_rval.consumed);
ASN_STRUCT_FREE(asn_DEF_NR_UE_NR_Capability, UE->UE_Capability_nr);
UE->UE_Capability_nr = 0;
}
UE->UE_Capability_size = ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.size;
if(NR_index != -1){
LOG_E(NR_RRC,"fatal: more than 1 eutra capability\n");
exit(1);
}
NR_index = i;
}
if(ue_CapabilityRAT_ContainerList->list.array[i]->rat_Type ==
NR_RAT_Type_eutra_nr){
LOG_I(NR_RRC, "DU received NR_RAT_Type_eutra_nr UE capabilities Info through the UE Context Setup Request from the CU \n");
if (UE->UE_Capability_MRDC) {
ASN_STRUCT_FREE(asn_DEF_NR_UE_MRDC_Capability, UE->UE_Capability_MRDC);
UE->UE_Capability_MRDC = 0;
}
dec_rval = uper_decode(NULL,
&asn_DEF_NR_UE_MRDC_Capability,
(void **)&UE->UE_Capability_MRDC,
ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.buf,
ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.size,
0,
0);
if(LOG_DEBUGFLAG(DEBUG_ASN1)){
xer_fprint(stdout, &asn_DEF_NR_UE_MRDC_Capability, UE->UE_Capability_MRDC);
}
if((dec_rval.code != RC_OK) && (dec_rval.consumed == 0)){
LOG_E(NR_RRC, "UE %04x Failed to decode nr UE capabilities (%zu bytes)\n", req->rnti, dec_rval.consumed);
ASN_STRUCT_FREE(asn_DEF_NR_UE_MRDC_Capability, UE->UE_Capability_MRDC);
UE->UE_Capability_MRDC = 0;
}
UE->UE_MRDC_Capability_size = ue_CapabilityRAT_ContainerList->list.array[i]->ue_CapabilityRAT_Container.size;
}
if(ue_CapabilityRAT_ContainerList->list.array[i]->rat_Type ==
NR_RAT_Type_eutra){
//TODO
}
}
}
}
/* Configure SRB2 */
NR_SRB_ToAddMod_t *SRB2_config = NULL;
NR_SRB_ToAddModList_t *SRB_configList = NULL;
uint8_t SRBs_before_new_addition = 0;
if(req->srbs_to_be_setup_length>0){
if (UE->SRB_configList == NULL) {
LOG_W(NR_RRC, "The SRB list of the UE context is empty before the addition of new SRB at the DU \n");
UE->SRB_configList = CALLOC(1, sizeof(*UE->SRB_configList));
}
SRB_configList = UE->SRB_configList;
SRBs_before_new_addition = SRB_configList->list.count;
for (int i=0; i<req->srbs_to_be_setup_length; i++){
SRB2_config = CALLOC(1, sizeof(*SRB2_config));
SRB2_config->srb_Identity = req->srbs_to_be_setup[i].srb_id;
asn1cSeqAdd(&SRB_configList->list, SRB2_config);
}
}
/* Configure DRB */
NR_DRB_ToAddMod_t *DRB_config = NULL;
NR_DRB_ToAddModList_t *DRB_configList = NULL;
uint8_t drb_id_to_setup_start = 0;
uint8_t nb_drb_to_setup = 0;
long drb_priority[NGAP_MAX_DRBS_PER_UE];
if(req->drbs_to_be_setup_length>0){
if (UE->DRB_configList == NULL) {
UE->DRB_configList = CALLOC(1, sizeof(*UE->DRB_configList));
}
DRB_configList = UE->DRB_configList;
nb_drb_to_setup = req->drbs_to_be_setup_length;
for (int i=0; i<req->drbs_to_be_setup_length; i++){
DRB_config = CALLOC(1, sizeof(*DRB_config));
DRB_config->drb_Identity = req->drbs_to_be_setup[i].drb_id;
if (drb_id_to_setup_start == 0) drb_id_to_setup_start = DRB_config->drb_Identity;
asn1cSeqAdd(&DRB_configList->list, DRB_config);
f1ap_drb_to_be_setup_t drb_p = req->drbs_to_be_setup[i];
transport_layer_addr_t addr;
memcpy(addr.buffer, &drb_p.up_ul_tnl[0].tl_address, sizeof(drb_p.up_ul_tnl[0].tl_address));
addr.length=sizeof(drb_p.up_ul_tnl[0].tl_address)*8;
extern instance_t DUuniqInstance;
incoming_teid = newGtpuCreateTunnel(DUuniqInstance,
req->rnti,
drb_p.drb_id,
drb_p.drb_id,
drb_p.up_ul_tnl[0].teid,
-1, // no qfi
addr,
drb_p.up_ul_tnl[0].port,
DURecvCb,
NULL);
/* TODO: hardcoded to 13 for the time being, to be changed? */
drb_priority[DRB_config->drb_Identity-1] = 13;
}
}
NR_CellGroupConfig_t *cellGroupConfig = calloc(1, sizeof(NR_CellGroupConfig_t));
if (req->srbs_to_be_setup_length > 0 || req->drbs_to_be_setup_length>0)
// FIXME: fill_mastercellGroupConfig() won't fill the right priorities or
// bearer IDs for the DRBs
fill_mastercellGroupConfig(cellGroupConfig, UE->masterCellGroup, rrc->um_on_default_drb, SRB2_config ? 1 : 0, drb_id_to_setup_start, nb_drb_to_setup, drb_priority);
protocol_ctxt_t ctxt = {.rntiMaybeUEid = req->rnti, .module_id = instance, .instance = instance, .enb_flag = 1, .eNB_index = instance};
apply_macrlc_config(rrc, ue_context_p, &ctxt);
if(req->rrc_container_length > 0){
mem_block_t *pdcp_pdu_p = get_free_mem_block(req->rrc_container_length, __func__);
memcpy(&pdcp_pdu_p->data[0], req->rrc_container, req->rrc_container_length);
du_rlc_data_req(&ctxt, 1, 0x00, 1, 1, 0, req->rrc_container_length, pdcp_pdu_p);
LOG_I(F1AP, "Printing RRC Container of UE context setup request: \n");
for (int j=0; j<req->rrc_container_length; j++){
printf("%02x ", pdcp_pdu_p->data[j]);
}
printf("\n");
}
/* Fill the UE context setup response ITTI message to send to F1AP */
resp->gNB_CU_ue_id = req->gNB_CU_ue_id;
resp->rnti = req->rnti;
if(DRB_configList){
if(DRB_configList->list.count > 0){
resp->drbs_to_be_setup = calloc(1,DRB_configList->list.count*sizeof(f1ap_drb_to_be_setup_t));
resp->drbs_to_be_setup_length = DRB_configList->list.count;
for (int i=0; i<DRB_configList->list.count; i++){
resp->drbs_to_be_setup[i].drb_id = DRB_configList->list.array[i]->drb_Identity;
resp->drbs_to_be_setup[i].rlc_mode = RLC_MODE_AM;
resp->drbs_to_be_setup[i].up_dl_tnl[0].teid = incoming_teid;
resp->drbs_to_be_setup[i].up_dl_tnl[0].tl_address = inet_addr(mac->eth_params_n.my_addr);
resp->drbs_to_be_setup[i].up_dl_tnl_length = 1;
}
}
else{
LOG_W(NR_RRC, "No DRB added upon reception of F1 UE context setup request with a DRB to setup list\n");
}
}
if(SRB_configList){
if(SRB_configList->list.count >0 && SRBs_before_new_addition < SRB_configList->list.count){
resp->srbs_to_be_setup = calloc(1,req->srbs_to_be_setup_length*sizeof(f1ap_srb_to_be_setup_t));
resp->srbs_to_be_setup_length = req->srbs_to_be_setup_length;
for (int i=SRBs_before_new_addition; i<SRB_configList->list.count; i++){
resp->srbs_to_be_setup[i-SRBs_before_new_addition].srb_id = SRB_configList->list.array[i]->srb_Identity;
}
}
else{
LOG_W(NR_RRC, "No SRB added upon reception of F1 UE Context setup request at the DU\n");
}
}
else{
LOG_W(NR_RRC, "No SRB added upon reception of F1 UE Context setup request at the DU\n");
}
/* fixme:
* Here we should be encoding the updates on cellgroupconfig based on the content of UE capabilities
*/
resp->du_to_cu_rrc_information = calloc(1, sizeof(du_to_cu_rrc_information_t));
resp->du_to_cu_rrc_information->cellGroupConfig = calloc(1,1024);
asn_enc_rval_t enc_rval = uper_encode_to_buffer(&asn_DEF_NR_CellGroupConfig,
NULL,
UE->masterCellGroup, //(void *)cellGroupConfig,
resp->du_to_cu_rrc_information->cellGroupConfig,
1024);
if (enc_rval.encoded == -1) {
LOG_E(F1AP,"Could not encode ue_context.masterCellGroup, failed element %s\n",enc_rval.failed_type->name);
exit(-1);
}
resp->du_to_cu_rrc_information->cellGroupConfig_length = (enc_rval.encoded+7)>>3;
free(cellGroupConfig);
itti_send_msg_to_task(TASK_DU_F1, instance, message_p);
}
static void rrc_DU_process_ue_context_modification_request(MessageDef *msg_p, instance_t instance)
{
f1ap_ue_context_setup_t * req=&F1AP_UE_CONTEXT_MODIFICATION_REQ(msg_p);
......@@ -2899,54 +2612,29 @@ static void rrc_DU_process_ue_context_modification_request(MessageDef *msg_p, in
static void rrc_CU_process_ue_context_setup_response(MessageDef *msg_p, instance_t instance)
{
f1ap_ue_context_setup_t * resp=&F1AP_UE_CONTEXT_SETUP_RESP(msg_p);
protocol_ctxt_t ctxt = {.rntiMaybeUEid = resp->rnti, .module_id = instance, .instance = instance, .enb_flag = 1, .eNB_index = instance};
gNB_RRC_INST *rrc = RC.nrrrc[ctxt.module_id];
gNB_RRC_INST *rrc = RC.nrrrc[instance];
rrc_gNB_ue_context_t *ue_context_p = rrc_gNB_get_ue_context_by_rnti(rrc, resp->rnti);
gNB_RRC_UE_t *UE = &ue_context_p->ue_context;
NR_CellGroupConfig_t *cellGroupConfig = NULL;
asn_dec_rval_t dec_rval = uper_decode_complete( NULL,
&asn_DEF_NR_CellGroupConfig,
(void **)&cellGroupConfig,
(uint8_t *)resp->du_to_cu_rrc_information->cellGroupConfig,
(int) resp->du_to_cu_rrc_information->cellGroupConfig_length);
if ((dec_rval.code != RC_OK) && (dec_rval.consumed == 0)) {
AssertFatal(1==0,"Cell group config decode error\n");
// free the memory
SEQUENCE_free( &asn_DEF_NR_CellGroupConfig, cellGroupConfig, 1 );
return;
}
//xer_fprint(stdout,&asn_DEF_NR_CellGroupConfig, cellGroupConfig);
NR_CellGroupConfig_t *cellGroupConfig = NULL;
asn_dec_rval_t dec_rval = uper_decode_complete(NULL,
&asn_DEF_NR_CellGroupConfig,
(void **)&cellGroupConfig,
(uint8_t *)resp->du_to_cu_rrc_information->cellGroupConfig,
resp->du_to_cu_rrc_information->cellGroupConfig_length);
AssertFatal(dec_rval.code == RC_OK && dec_rval.consumed > 0, "Cell group config decode error\n");
if (UE->masterCellGroup == NULL) {
UE->masterCellGroup = calloc(1, sizeof(NR_CellGroupConfig_t));
}
if(cellGroupConfig->rlc_BearerToAddModList!=NULL){
if (UE->masterCellGroup->rlc_BearerToAddModList != NULL) {
int ue_ctxt_rlc_Bearers = UE->masterCellGroup->rlc_BearerToAddModList->list.count;
for(int i=ue_ctxt_rlc_Bearers; i<ue_ctxt_rlc_Bearers + cellGroupConfig->rlc_BearerToAddModList->list.count; i++){
asn1cSeqAdd(&UE->masterCellGroup->rlc_BearerToAddModList->list, cellGroupConfig->rlc_BearerToAddModList->list.array[i - ue_ctxt_rlc_Bearers]);
}
} else {
LOG_W(NR_RRC, "Empty rlc_BearerToAddModList at ue_context of the CU before filling the updates from UE context setup response \n");
UE->masterCellGroup->rlc_BearerToAddModList = calloc(1, sizeof(*cellGroupConfig->rlc_BearerToAddModList));
memcpy(UE->masterCellGroup->rlc_BearerToAddModList, cellGroupConfig->rlc_BearerToAddModList, sizeof(*cellGroupConfig->rlc_BearerToAddModList));
}
if (UE->masterCellGroup) {
ASN_STRUCT_FREE(asn_DEF_NR_CellGroupConfig, UE->masterCellGroup);
LOG_I(RRC, "UE %04x replacing existing CellGroupConfig with new one received from DU\n", UE->rnti);
}
if (LOG_DEBUGFLAG(DEBUG_ASN1)) {
UE->masterCellGroup = cellGroupConfig;
if (LOG_DEBUGFLAG(DEBUG_ASN1))
xer_fprint(stdout, &asn_DEF_NR_CellGroupConfig, UE->masterCellGroup);
}
if (UE->established_pdu_sessions_flag == 1) {
fill_DRB_configList(&ctxt, ue_context_p);
rrc_gNB_generate_dedicatedRRCReconfiguration(&ctxt, ue_context_p, cellGroupConfig);
} else {
rrc_gNB_generate_defaultRRCReconfiguration(&ctxt, ue_context_p);
}
free(cellGroupConfig->rlc_BearerToAddModList);
free(cellGroupConfig);
/* at this point, we don't have to do anything: the UE context setup request
* includes the Security Command, whose response will trigger the following
* messages (UE capability, to be specific) */
}
static void rrc_CU_process_ue_context_modification_response(MessageDef *msg_p, instance_t instance)
......@@ -3508,10 +3196,6 @@ void *rrc_gnb_task(void *args_p) {
AssertFatal(NODE_IS_CU(RC.nrrrc[instance]->node_type), "should not receive F1AP_SETUP_REQUEST, need call by CU!\n");
rrc_gNB_process_f1_setup_req(&F1AP_SETUP_REQ(msg_p));
break;
case F1AP_UE_CONTEXT_SETUP_REQ:
rrc_DU_process_ue_context_setup_request(msg_p, instance);
break;
case F1AP_UE_CONTEXT_SETUP_RESP:
rrc_CU_process_ue_context_setup_response(msg_p, instance);
......@@ -3583,6 +3267,27 @@ void *rrc_gnb_task(void *args_p) {
}
}
static void rrc_deliver_ue_ctxt_setup_req(void *deliver_pdu_data, ue_id_t ue_id, int srb_id, char *buf, int size, int sdu_id)
{
DevAssert(deliver_pdu_data != NULL);
gNB_RRC_INST *rrc = deliver_pdu_data;
f1ap_ue_context_setup_t ue_context_setup_req = {
.gNB_CU_ue_id = 0xffffffff, /* filled by F1 for the moment */
.gNB_DU_ue_id = 0xffffffff, /* filled by F1 for the moment */
.rnti = ue_id,
.mcc = rrc->configuration.mcc[0],
.mnc = rrc->configuration.mnc[0],
.mnc_digit_length = rrc->configuration.mnc_digit_length[0],
.nr_cellid = rrc->nr_cellid,
.servCellId = 0, /* TODO: correct value? */
.srbs_to_be_setup = 0, /* no new SRBs */
.drbs_to_be_setup = 0, /* no new DRBs */
.rrc_container = (uint8_t*) buf, /* security mode command */
.rrc_container_length = size,
};
rrc->mac_rrc.ue_context_setup_request(&ue_context_setup_req);
}
//-----------------------------------------------------------------------------
void
rrc_gNB_generate_SecurityModeCommand(
......@@ -3604,7 +3309,8 @@ rrc_gNB_generate_SecurityModeCommand(
gNB_RRC_INST *rrc = RC.nrrrc[ctxt_pP->module_id];
AssertFatal(!NODE_IS_DU(rrc->node_type), "illegal node type DU!\n");
nr_pdcp_data_req_srb(ctxt_pP->rntiMaybeUEid, DCCH, rrc_gNB_mui++, size, buffer, deliver_pdu_srb_f1, rrc);
/* the callback will fill the UE context setup request and forward it */
nr_pdcp_data_req_srb(ctxt_pP->rntiMaybeUEid, DCCH, rrc_gNB_mui++, size, buffer, rrc_deliver_ue_ctxt_setup_req, rrc);
}
void
......
......@@ -401,8 +401,11 @@ void rrc_remove_nsa_user(gNB_RRC_INST *rrc, int rnti) {
rrc_rlc_remove_ue(&ctxt);
// WHAT A RACE CONDITION
// lock the scheduler before removing the UE. Note: mac_remove_nr_ue() checks
// that the scheduler is actually locked!
NR_SCHED_LOCK(&RC.nrmac[rrc->module_id]->sched_lock);
mac_remove_nr_ue(RC.nrmac[rrc->module_id], rnti);
NR_SCHED_UNLOCK(&RC.nrmac[rrc->module_id]->sched_lock);
gtpv1u_enb_delete_tunnel_req_t tmp={0};
tmp.rnti=rnti;
tmp.from_gnb=1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment