Commit 39a69241 authored by Laurent THOMAS's avatar Laurent THOMAS

ue-remove-rx-tx-minimum

parent 14e2d909
...@@ -92,10 +92,6 @@ ...@@ -92,10 +92,6 @@
* *
*/ */
#define RX_JOB_ID 0x1010
#define TX_JOB_ID 100
typedef enum { typedef enum {
pss = 0, pss = 0,
pbch = 1, pbch = 1,
...@@ -505,7 +501,6 @@ static void RU_write(nr_rxtx_thread_data_t *rxtxD) { ...@@ -505,7 +501,6 @@ static void RU_write(nr_rxtx_thread_data_t *rxtxD) {
if (mac->phy_config_request_sent && if (mac->phy_config_request_sent &&
openair0_cfg[0].duplex_mode == duplex_mode_TDD && openair0_cfg[0].duplex_mode == duplex_mode_TDD &&
!get_softmodem_params()->continuous_tx) { !get_softmodem_params()->continuous_tx) {
int slots_frame = UE->frame_parms.slots_per_frame; int slots_frame = UE->frame_parms.slots_per_frame;
int curr_slot = nr_ue_slot_select(&UE->nrUE_config, slot); int curr_slot = nr_ue_slot_select(&UE->nrUE_config, slot);
if (curr_slot != NR_DOWNLINK_SLOT) { if (curr_slot != NR_DOWNLINK_SLOT) {
...@@ -522,18 +517,17 @@ static void RU_write(nr_rxtx_thread_data_t *rxtxD) { ...@@ -522,18 +517,17 @@ static void RU_write(nr_rxtx_thread_data_t *rxtxD) {
flags = TX_BURST_MIDDLE; flags = TX_BURST_MIDDLE;
} }
if (flags || IS_SOFTMODEM_RFSIM) AssertFatal(rxtxD->writeBlockSize
AssertFatal(rxtxD->writeBlockSize == == openair0_write_in_order(&UE->rfdevice,
UE->rfdevice.trx_write_func(&UE->rfdevice, proc->timestamp_tx,
proc->timestamp_tx, txp,
txp, rxtxD->writeBlockSize,
rxtxD->writeBlockSize, UE->frame_parms.nb_antennas_tx,
UE->frame_parms.nb_antennas_tx, flags),
flags),""); "");
for (int i=0; i<UE->frame_parms.nb_antennas_tx; i++) for (int i=0; i<UE->frame_parms.nb_antennas_tx; i++)
memset(txp[i], 0, rxtxD->writeBlockSize); memset(txp[i], 0, rxtxD->writeBlockSize);
} }
void processSlotTX(void *arg) { void processSlotTX(void *arg) {
...@@ -542,6 +536,11 @@ void processSlotTX(void *arg) { ...@@ -542,6 +536,11 @@ void processSlotTX(void *arg) {
UE_nr_rxtx_proc_t *proc = &rxtxD->proc; UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
PHY_VARS_NR_UE *UE = rxtxD->UE; PHY_VARS_NR_UE *UE = rxtxD->UE;
nr_phy_data_tx_t phy_data = {0}; nr_phy_data_tx_t phy_data = {0};
// force wait previous slot finished
// We block to prevent any // execution of processSlotTX() as there are race condtions to fix (likely a lot of)
notifiedFIFO_elt_t *res = pullNotifiedFIFO(UE->tx_resume_ind_fifo[proc->nr_slot_tx]);
delNotifiedFIFO_elt(res);
LOG_D(PHY,"%d.%d => slot type %d\n", proc->frame_tx, proc->nr_slot_tx, proc->tx_slot_type); LOG_D(PHY,"%d.%d => slot type %d\n", proc->frame_tx, proc->nr_slot_tx, proc->tx_slot_type);
if (proc->tx_slot_type == NR_UPLINK_SLOT || proc->tx_slot_type == NR_MIXED_SLOT){ if (proc->tx_slot_type == NR_UPLINK_SLOT || proc->tx_slot_type == NR_MIXED_SLOT){
...@@ -563,6 +562,7 @@ void processSlotTX(void *arg) { ...@@ -563,6 +562,7 @@ void processSlotTX(void *arg) {
.slot_rx = proc->nr_slot_rx, .slot_rx = proc->nr_slot_rx,
.frame_tx = proc->frame_tx, .frame_tx = proc->frame_tx,
.slot_tx = proc->nr_slot_tx, .slot_tx = proc->nr_slot_tx,
.dci_ind = NULL,
.phy_data = &phy_data}; .phy_data = &phy_data};
UE->if_inst->ul_indication(&ul_indication); UE->if_inst->ul_indication(&ul_indication);
...@@ -571,7 +571,12 @@ void processSlotTX(void *arg) { ...@@ -571,7 +571,12 @@ void processSlotTX(void *arg) {
phy_procedures_nrUE_TX(UE, proc, &phy_data); phy_procedures_nrUE_TX(UE, proc, &phy_data);
} }
// check we have not made mistake, the queue must be empty now
res = pollNotifiedFIFO(UE->tx_resume_ind_fifo[proc->nr_slot_tx]);
AssertFatal(res == NULL, "");
// unblock next slot processing
int next_slot = (proc->nr_slot_tx + 1) % UE->frame_parms.slots_per_frame;
send_slot_ind(UE->tx_resume_ind_fifo[next_slot], next_slot);
RU_write(rxtxD); RU_write(rxtxD);
} }
...@@ -746,9 +751,6 @@ void *UE_thread(void *arg) ...@@ -746,9 +751,6 @@ void *UE_thread(void *arg)
notifiedFIFO_t nf; notifiedFIFO_t nf;
initNotifiedFIFO(&nf); initNotifiedFIFO(&nf);
notifiedFIFO_t txFifo;
initNotifiedFIFO(&txFifo);
notifiedFIFO_t freeBlocks; notifiedFIFO_t freeBlocks;
initNotifiedFIFO_nothreadSafe(&freeBlocks); initNotifiedFIFO_nothreadSafe(&freeBlocks);
...@@ -760,13 +762,13 @@ void *UE_thread(void *arg) ...@@ -760,13 +762,13 @@ void *UE_thread(void *arg)
int absolute_slot=0, decoded_frame_rx=INT_MAX, trashed_frames=0; int absolute_slot=0, decoded_frame_rx=INT_MAX, trashed_frames=0;
initNotifiedFIFO(&UE->phy_config_ind); initNotifiedFIFO(&UE->phy_config_ind);
int num_ind_fifo = nb_slot_frame; int tx_wait_for_dlsch[nb_slot_frame];
for(int i=0; i < num_ind_fifo; i++) { for (int i = 0; i < nb_slot_frame; i++) {
UE->tx_wait_for_dlsch[num_ind_fifo] = 0; tx_wait_for_dlsch[i] = 0;
UE->tx_resume_ind_fifo[i] = malloc(sizeof(*UE->tx_resume_ind_fifo[i])); UE->tx_resume_ind_fifo[i] = malloc(sizeof(*UE->tx_resume_ind_fifo[i]));
initNotifiedFIFO(UE->tx_resume_ind_fifo[i]); initNotifiedFIFO(UE->tx_resume_ind_fifo[i]);
} }
bool first_tx = true;
while (!oai_exit) { while (!oai_exit) {
if (syncRunning) { if (syncRunning) {
...@@ -836,6 +838,7 @@ void *UE_thread(void *arg) ...@@ -836,6 +838,7 @@ void *UE_thread(void *arg)
absolute_slot++; absolute_slot++;
const int DURATION_RX_TO_TX = 2;
int slot_nr = absolute_slot % nb_slot_frame; int slot_nr = absolute_slot % nb_slot_frame;
nr_rxtx_thread_data_t curMsg = {0}; nr_rxtx_thread_data_t curMsg = {0};
...@@ -908,32 +911,57 @@ void *UE_thread(void *arg) ...@@ -908,32 +911,57 @@ void *UE_thread(void *arg)
if (curMsg.proc.nr_slot_tx == 0) if (curMsg.proc.nr_slot_tx == 0)
nr_ue_rrc_timer_trigger(UE->Mod_id, curMsg.proc.frame_tx, curMsg.proc.gNB_id); nr_ue_rrc_timer_trigger(UE->Mod_id, curMsg.proc.frame_tx, curMsg.proc.gNB_id);
// Start TX slot processing here. It runs in parallel with RX slot processing UE_nr_rxtx_proc_t proc = {0};
notifiedFIFO_elt_t *newElt = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_tx, &txFifo, processSlotTX); // update thread index for received subframe
nr_rxtx_thread_data_t *curMsgTx = (nr_rxtx_thread_data_t *) NotifiedFifoData(newElt); proc.nr_slot_rx = slot_nr;
curMsgTx->proc = curMsg.proc; proc.nr_slot_tx = (absolute_slot + DURATION_RX_TO_TX) % nb_slot_frame;
proc.frame_rx = (absolute_slot / nb_slot_frame) % MAX_FRAME_NUMBER;
proc.frame_tx = ((absolute_slot + DURATION_RX_TO_TX) / nb_slot_frame) % MAX_FRAME_NUMBER;
proc.rx_slot_type = nr_ue_slot_select(cfg, proc.nr_slot_rx);
proc.tx_slot_type = nr_ue_slot_select(cfg, proc.nr_slot_tx);
// proc.frame_number_4lsb = -1;
// LOG_I(PHY,"Process slot %d total gain %d\n", slot_nr, UE->rx_total_gain_dB);
// Decode DCI
notifiedFIFO_elt_t *MsgRx = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), proc.nr_slot_rx, NULL, UE_dl_processing);
nr_rxtx_thread_data_t *curMsgRx = (nr_rxtx_thread_data_t *)NotifiedFifoData(MsgRx);
memset(curMsgRx, 0, sizeof(*curMsgRx));
curMsgRx->phy_data = UE_dl_preprocessing(UE, &proc);
// From DCI, note in future tx if we have to wait DL decode to be done
if (curMsgRx->phy_data.dlsch[0].active && curMsgRx->phy_data.dlsch[0].rnti_type != _RA_RNTI_
&& curMsgRx->phy_data.dlsch[0].rnti_type != _SI_RNTI_) {
// indicate to tx thread to wait for DLSCH decoding
const int ack_nack_slot =
(proc.nr_slot_rx + curMsgRx->phy_data.dlsch[0].dlsch_config.k1_feedback) % UE->frame_parms.slots_per_frame;
tx_wait_for_dlsch[ack_nack_slot]++;
}
// We have processed DCI
// We have noted down what we have to do in future Tx in tx_wait_for_dlsch[]
// now RX slot processing after DCI. We launch and forget.
curMsgRx->proc = proc;
curMsgRx->UE = UE;
pushTpool(&(get_nrUE_params()->Tpool), MsgRx);
// Start TX slot processing here. It runs and end freely but it will wait the ACK/NACK information
notifiedFIFO_elt_t *MsgTx = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), proc.nr_slot_tx, NULL, processSlotTX);
nr_rxtx_thread_data_t *curMsgTx = (nr_rxtx_thread_data_t *)NotifiedFifoData(MsgTx);
curMsgTx->proc = proc;
curMsgTx->writeBlockSize = writeBlockSize; curMsgTx->writeBlockSize = writeBlockSize;
curMsgTx->proc.timestamp_tx = writeTimestamp; curMsgTx->proc.timestamp_tx = writeTimestamp;
curMsgTx->UE = UE; curMsgTx->UE = UE;
curMsgTx->tx_wait_for_dlsch = UE->tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx]; curMsgTx->tx_wait_for_dlsch = tx_wait_for_dlsch[proc.nr_slot_tx];
UE->tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx] = 0; if (tx_wait_for_dlsch[proc.nr_slot_tx])
pushTpool(&(get_nrUE_params()->Tpool), newElt); LOG_D(PHY, "reading launch tx for slot %d total to wait %d\n", proc.nr_slot_tx, tx_wait_for_dlsch[proc.nr_slot_tx]);
tx_wait_for_dlsch[proc.nr_slot_tx] = 0; // we always wait 1 event at beginning to keep in order
// RX slot processing. We launch and forget. if (first_tx) {
newElt = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_rx, NULL, UE_dl_processing); // as we wait previous slot to finish, let's tell to the first slot, the "previous" is done
nr_rxtx_thread_data_t *curMsgRx = (nr_rxtx_thread_data_t *) NotifiedFifoData(newElt); first_tx = false;
curMsgRx->proc = curMsg.proc; send_slot_ind(UE->tx_resume_ind_fifo[curMsgTx->proc.nr_slot_tx], curMsgTx->proc.nr_slot_tx);
curMsgRx->UE = UE; }
curMsgRx->phy_data = UE_dl_preprocessing(UE, &curMsg.proc); pushTpool(&(get_nrUE_params()->Tpool), MsgTx);
pushTpool(&(get_nrUE_params()->Tpool), newElt);
// Wait for TX slot processing to finish
notifiedFIFO_elt_t *res;
res = pullTpool(&txFifo, &(get_nrUE_params()->Tpool));
if (res == NULL)
LOG_E(PHY, "Tpool has been aborted\n");
else
delNotifiedFIFO_elt(res);
} // while !oai_exit } // while !oai_exit
return NULL; return NULL;
......
...@@ -268,14 +268,6 @@ ...@@ -268,14 +268,6 @@
#define TDD_CONFIG_NB_FRAMES (2) #define TDD_CONFIG_NB_FRAMES (2)
#define NR_MAX_SLOTS_PER_FRAME (160) /* number of slots per frame */ #define NR_MAX_SLOTS_PER_FRAME (160) /* number of slots per frame */
/* FFS_NR_TODO it defines ue capability which is the number of slots */
/* - between reception of pdsch and tarnsmission of its acknowlegment */
/* - between reception of un uplink grant and its related transmission */
// should be 2 as per NR standard, but current UE is not able to perform this value
#define NR_UE_CAPABILITY_SLOT_RX_TO_TX (3)
#define DURATION_RX_TO_TX (NR_UE_CAPABILITY_SLOT_RX_TO_TX)
#define NR_MAX_ULSCH_HARQ_PROCESSES (NR_MAX_HARQ_PROCESSES) /* cf 38.214 6.1 UE procedure for receiving the physical uplink shared channel */ #define NR_MAX_ULSCH_HARQ_PROCESSES (NR_MAX_HARQ_PROCESSES) /* cf 38.214 6.1 UE procedure for receiving the physical uplink shared channel */
#define NR_MAX_DLSCH_HARQ_PROCESSES (NR_MAX_HARQ_PROCESSES) /* cf 38.214 5.1 UE procedure for receiving the physical downlink shared channel */ #define NR_MAX_DLSCH_HARQ_PROCESSES (NR_MAX_HARQ_PROCESSES) /* cf 38.214 5.1 UE procedure for receiving the physical downlink shared channel */
#endif #endif
......
...@@ -686,7 +686,7 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc, ...@@ -686,7 +686,7 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc,
if (is_cw0_active != ACTIVE && is_cw1_active != ACTIVE) { if (is_cw0_active != ACTIVE && is_cw1_active != ACTIVE) {
// don't wait anymore // don't wait anymore
const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame; const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], proc->nr_slot_rx); send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], ack_nack_slot);
return false; return false;
} }
...@@ -828,7 +828,8 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc, ...@@ -828,7 +828,8 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc,
// DLSCH decoding finished! don't wait anymore // DLSCH decoding finished! don't wait anymore
const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame; const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], proc->nr_slot_rx); if (dlsch[0].rnti_type != _SI_RNTI_ && dlsch[0].rnti_type != _RA_RNTI_)
send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], ack_nack_slot);
if (ue->phy_sim_dlsch_b) if (ue->phy_sim_dlsch_b)
memcpy(ue->phy_sim_dlsch_b, p_b, dlsch_bytes); memcpy(ue->phy_sim_dlsch_b, p_b, dlsch_bytes);
...@@ -1099,9 +1100,11 @@ void pdsch_processing(PHY_VARS_NR_UE *ue, ...@@ -1099,9 +1100,11 @@ void pdsch_processing(PHY_VARS_NR_UE *ue,
if (ret_pdsch >= 0) if (ret_pdsch >= 0)
nr_ue_dlsch_procedures(ue, proc, dlsch, llr); nr_ue_dlsch_procedures(ue, proc, dlsch, llr);
else else {
// don't wait anymore // don't wait anymore
send_slot_ind(ue->tx_resume_ind_fifo[(proc->nr_slot_rx + dlsch_config->k1_feedback) % ue->frame_parms.slots_per_frame], proc->nr_slot_rx); int slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_slot_ind(ue->tx_resume_ind_fifo[slot], slot);
}
stop_meas(&ue->dlsch_procedures_stat); stop_meas(&ue->dlsch_procedures_stat);
if (cpumeas(CPUMEAS_GETSTATE)) { if (cpumeas(CPUMEAS_GETSTATE)) {
......
...@@ -761,12 +761,6 @@ static int nr_ue_process_dci_dl_10(module_id_t module_id, ...@@ -761,12 +761,6 @@ static int nr_ue_process_dci_dl_10(module_id_t module_id,
return -1; return -1;
} }
if (dci_ind->rnti != mac->ra.ra_rnti && dci_ind->rnti != SI_RNTI)
AssertFatal(1 + dci->pdsch_to_harq_feedback_timing_indicator.val > DURATION_RX_TO_TX,
"PDSCH to HARQ feedback time (%d) needs to be higher than DURATION_RX_TO_TX (%d).\n",
1 + dci->pdsch_to_harq_feedback_timing_indicator.val,
DURATION_RX_TO_TX);
// set the harq status at MAC for feedback // set the harq status at MAC for feedback
set_harq_status(mac, set_harq_status(mac,
dci->pucch_resource_indicator, dci->pucch_resource_indicator,
...@@ -1078,12 +1072,6 @@ static int nr_ue_process_dci_dl_11(module_id_t module_id, ...@@ -1078,12 +1072,6 @@ static int nr_ue_process_dci_dl_11(module_id_t module_id,
// according to TS 38.213 Table 9.2.3-1 // according to TS 38.213 Table 9.2.3-1
uint8_t feedback_ti = pucch_Config->dl_DataToUL_ACK->list.array[dci->pdsch_to_harq_feedback_timing_indicator.val][0]; uint8_t feedback_ti = pucch_Config->dl_DataToUL_ACK->list.array[dci->pdsch_to_harq_feedback_timing_indicator.val][0];
AssertFatal(feedback_ti > DURATION_RX_TO_TX,
"PDSCH to HARQ feedback time (%d) needs to be higher than DURATION_RX_TO_TX (%d). Min feedback time set in config "
"file (min_rxtxtime).\n",
feedback_ti,
DURATION_RX_TO_TX);
// set the harq status at MAC for feedback // set the harq status at MAC for feedback
set_harq_status(mac, set_harq_status(mac,
dci->pucch_resource_indicator, dci->pucch_resource_indicator,
......
...@@ -884,9 +884,6 @@ void nr_ue_aperiodic_srs_scheduling(NR_UE_MAC_INST_t *mac, long resource_trigger ...@@ -884,9 +884,6 @@ void nr_ue_aperiodic_srs_scheduling(NR_UE_MAC_INST_t *mac, long resource_trigger
return; return;
} }
AssertFatal(slot_offset > DURATION_RX_TO_TX,
"Slot offset between DCI and aperiodic SRS (%d) needs to be higher than DURATION_RX_TO_TX (%d)\n",
slot_offset, DURATION_RX_TO_TX);
int n_slots_frame = nr_slots_per_frame[current_UL_BWP->scs]; int n_slots_frame = nr_slots_per_frame[current_UL_BWP->scs];
int sched_slot = (slot + slot_offset) % n_slots_frame; int sched_slot = (slot + slot_offset) % n_slots_frame;
NR_TDD_UL_DL_ConfigCommon_t *tdd_config = mac->tdd_UL_DL_ConfigurationCommon; NR_TDD_UL_DL_ConfigCommon_t *tdd_config = mac->tdd_UL_DL_ConfigurationCommon;
...@@ -1411,13 +1408,6 @@ int nr_get_sf_retxBSRTimer(uint8_t sf_offset) { ...@@ -1411,13 +1408,6 @@ int nr_get_sf_retxBSRTimer(uint8_t sf_offset) {
// Note: Msg3 tx in the uplink symbols of mixed slot // Note: Msg3 tx in the uplink symbols of mixed slot
int nr_ue_pusch_scheduler(NR_UE_MAC_INST_t *mac, uint8_t is_Msg3, frame_t current_frame, int current_slot, frame_t *frame_tx, int *slot_tx, long k2) int nr_ue_pusch_scheduler(NR_UE_MAC_INST_t *mac, uint8_t is_Msg3, frame_t current_frame, int current_slot, frame_t *frame_tx, int *slot_tx, long k2)
{ {
AssertFatal(k2 > DURATION_RX_TO_TX,
"Slot offset K2 (%ld) needs to be higher than DURATION_RX_TO_TX (%d). Please set min_rxtxtime at least to %d in gNB config file or gNBs.[0].min_rxtxtime=%d via command line.\n",
k2,
DURATION_RX_TO_TX,
DURATION_RX_TO_TX,
DURATION_RX_TO_TX);
int delta = 0; int delta = 0;
NR_UE_UL_BWP_t *current_UL_BWP = &mac->current_UL_BWP; NR_UE_UL_BWP_t *current_UL_BWP = &mac->current_UL_BWP;
...@@ -1446,13 +1436,6 @@ int nr_ue_pusch_scheduler(NR_UE_MAC_INST_t *mac, uint8_t is_Msg3, frame_t curren ...@@ -1446,13 +1436,6 @@ int nr_ue_pusch_scheduler(NR_UE_MAC_INST_t *mac, uint8_t is_Msg3, frame_t curren
AssertFatal(1 == 0, "Invalid numerology %i\n", mu); AssertFatal(1 == 0, "Invalid numerology %i\n", mu);
} }
AssertFatal((k2 + delta) > DURATION_RX_TO_TX,
"Slot offset (%ld) for Msg3 needs to be higher than DURATION_RX_TO_TX (%d). Please set min_rxtxtime at least to %d in gNB config file or gNBs.[0].min_rxtxtime=%d via command line.\n",
k2,
DURATION_RX_TO_TX,
DURATION_RX_TO_TX,
DURATION_RX_TO_TX);
*slot_tx = (current_slot + k2 + delta) % nr_slots_per_frame[mu]; *slot_tx = (current_slot + k2 + delta) % nr_slots_per_frame[mu];
if (current_slot + k2 + delta >= nr_slots_per_frame[mu]){ if (current_slot + k2 + delta >= nr_slots_per_frame[mu]){
*frame_tx = (current_frame + 1) % 1024; *frame_tx = (current_frame + 1) % 1024;
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
//#include "targets/RT/USER/lte-softmodem.h" //#include "targets/RT/USER/lte-softmodem.h"
#include "executables/softmodem-common.h" #include "executables/softmodem-common.h"
#define MAX_GAP 100ULL
const char *const devtype_names[MAX_RF_DEV_TYPE] = const char *const devtype_names[MAX_RF_DEV_TYPE] =
{"", "USRP B200", "USRP X300", "USRP N300", "USRP X400", "BLADERF", "LMSSDR", "IRIS", "No HW", "UEDv2", "RFSIMULATOR"}; {"", "USRP B200", "USRP X300", "USRP N300", "USRP X400", "BLADERF", "LMSSDR", "IRIS", "No HW", "UEDv2", "RFSIMULATOR"};
...@@ -173,3 +174,93 @@ int openair0_transport_load(openair0_device *device, ...@@ -173,3 +174,93 @@ int openair0_transport_load(openair0_device *device,
return rc; return rc;
} }
static void writerEnqueue(re_order_t *ctx, openair0_timestamp timestamp, void **txp, int nsamps, int nbAnt, int flags)
{
pthread_mutex_lock(&ctx->mutex_store);
LOG_D(HW, "Enqueue write for TS: %lu\n", timestamp);
int i;
for (i = 0; i < WRITE_QUEUE_SZ; i++)
if (!ctx->queue[i].active) {
ctx->queue[i].timestamp = timestamp;
ctx->queue[i].active = true;
ctx->queue[i].nsamps = nsamps;
ctx->queue[i].nbAnt = nbAnt;
ctx->queue[i].flags = flags;
AssertFatal(nbAnt <= NB_ANTENNAS_TX, "");
for (int j = 0; j < nbAnt; j++)
ctx->queue[i].txp[j] = txp[j];
break;
}
AssertFatal(i < WRITE_QUEUE_SZ, "Write queue full\n");
pthread_mutex_unlock(&ctx->mutex_store);
}
static void writerProcessWaitingQueue(openair0_device *device)
{
bool found = false;
re_order_t *ctx = &device->reOrder;
do {
found = false;
pthread_mutex_lock(&ctx->mutex_store);
for (int i = 0; i < WRITE_QUEUE_SZ; i++)
if (ctx->queue[i].active && llabs(ctx->queue[i].timestamp - ctx->nextTS) < MAX_GAP) {
openair0_timestamp timestamp = ctx->queue[i].timestamp;
LOG_D(HW, "Dequeue write for TS: %lu\n", timestamp);
int nsamps = ctx->queue[i].nsamps;
int nbAnt = ctx->queue[i].nbAnt;
int flags = ctx->queue[i].flags;
void *txp[NB_ANTENNAS_TX];
AssertFatal(nbAnt <= NB_ANTENNAS_TX, "");
for (int j = 0; j < nbAnt; j++)
txp[j] = ctx->queue[i].txp[j];
ctx->queue[i].active = false;
pthread_mutex_unlock(&ctx->mutex_store);
found = true;
if (flags || IS_SOFTMODEM_RFSIM) {
int wroteSamples = device->trx_write_func(device, timestamp, txp, nsamps, nbAnt, flags);
if (wroteSamples != nsamps)
LOG_E(HW, "Failed to write to rf\n");
}
ctx->nextTS += nsamps;
pthread_mutex_lock(&ctx->mutex_store);
}
pthread_mutex_unlock(&ctx->mutex_store);
} while (found);
}
// We assume the data behind *tx are permanently allocated
int openair0_write_in_order(openair0_device *device, openair0_timestamp timestamp, void **txp, int nsamps, int nbAnt, int flags)
{
int wroteSamples = 0;
re_order_t *ctx = &device->reOrder;
LOG_D(HW, "received write order ts: %lu, nb samples %d, flags %d\n", timestamp, nsamps, flags);
if (!ctx->initDone) {
ctx->nextTS = timestamp;
pthread_mutex_init(&ctx->mutex_write, NULL);
pthread_mutex_init(&ctx->mutex_store, NULL);
ctx->initDone = true;
}
if (pthread_mutex_trylock(&ctx->mutex_write) == 0) {
// We have the write exclusivity
if (llabs(timestamp - ctx->nextTS) < MAX_GAP) { // We are writing in sequence of the previous write
if (flags || IS_SOFTMODEM_RFSIM)
wroteSamples = device->trx_write_func(device, timestamp, txp, nsamps, nbAnt, flags);
else
wroteSamples = nsamps;
ctx->nextTS += nsamps;
} else {
writerEnqueue(ctx, timestamp, txp, nsamps, nbAnt, flags);
}
writerProcessWaitingQueue(device);
pthread_mutex_unlock(&ctx->mutex_write);
return wroteSamples ? wroteSamples : nsamps;
}
writerEnqueue(ctx, timestamp, txp, nsamps, nbAnt, flags);
if (pthread_mutex_trylock(&ctx->mutex_write) == 0) {
writerProcessWaitingQueue(device);
pthread_mutex_unlock(&ctx->mutex_write);
}
return nsamps;
}
...@@ -362,6 +362,22 @@ typedef struct fhstate_s { ...@@ -362,6 +362,22 @@ typedef struct fhstate_s {
int active; int active;
} fhstate_t; } fhstate_t;
#define WRITE_QUEUE_SZ 20
typedef struct {
bool initDone;
pthread_mutex_t mutex_write;
pthread_mutex_t mutex_store;
openair0_timestamp nextTS;
struct {
bool active;
openair0_timestamp timestamp;
void *txp[NB_ANTENNAS_TX];
int nsamps;
int nbAnt;
int flags;
} queue[WRITE_QUEUE_SZ];
} re_order_t;
/*!\brief structure holds the parameters to configure USRP devices */ /*!\brief structure holds the parameters to configure USRP devices */
struct openair0_device_t { struct openair0_device_t {
/*!tx write thread*/ /*!tx write thread*/
...@@ -564,6 +580,7 @@ struct openair0_device_t { ...@@ -564,6 +580,7 @@ struct openair0_device_t {
/* \brief timing statistics for TX fronthaul (ethernet) /* \brief timing statistics for TX fronthaul (ethernet)
*/ */
time_stats_t tx_fhaul; time_stats_t tx_fhaul;
re_order_t reOrder;
}; };
/* type of device init function, implemented in shared lib */ /* type of device init function, implemented in shared lib */
...@@ -632,6 +649,7 @@ extern int read_recplayconfig(recplay_conf_t **recplay_conf, recplay_state_t **r ...@@ -632,6 +649,7 @@ extern int read_recplayconfig(recplay_conf_t **recplay_conf, recplay_state_t **r
/*! \brief store recorded iqs from memory to file. */ /*! \brief store recorded iqs from memory to file. */
extern void iqrecorder_end(openair0_device *device); extern void iqrecorder_end(openair0_device *device);
int openair0_write_in_order(openair0_device *device, openair0_timestamp timestamp, void **txp, int nsamps, int nbAnt, int flags);
#include <unistd.h> #include <unistd.h>
#ifndef gettid #ifndef gettid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment