Commit 00f23402 authored by Cedric Roux's avatar Cedric Roux

Merge remote-tracking branch 'origin/bugfixes-2018-w10' into develop_integration_2018_w10

parents 727e29e3 71e7f971
Branches unavailable
2024.w06 2024.w05 2024.w04 2024.w03 2024.w02 2024.w01 2023.w51 2023.w50 2023.w49 2023.w48 2023.w47 2023.w45 2023.w43 2023.w42 2023.w41 2023.w40 2023.w39 2023.w38 2023.w37 2023.w36 2023.w34 2023.w33 2023.w32 2023.w31 2023.w30 2023.w29 2023.w28 2023.w27 2023.w26 2023.w25 2023.w24 2023.w23 2023.w22 2023.w21 2023.w20 2023.w19 2023.w18 2023.w18b 2023.w16 2023.w15 2023.w14 2023.w13 2023.w12 2023.w11 2023.w11b 2023.w10 2023.w10b 2023.w09 2023.w08 2023.w08b 2023.w07 2023.w06 2023.w05 2023.w03 2023.w02 2022.42 2022.41 2022.w51 2022.w50 2022.w49 2022.w48 2022.w47 2022.w46 2022.w45 2022.w43 2022.w42 2022.w42b 2022.w41 2022.w40 2022.w39 2022.w38 2022.w37 2022.w37b 2022.w36 2022.w35 2022.w33 2022.w32 2022.w31 2022.w31b 2022.w30 2022.w29 2022.w26 2022.w25 2022.w24 2022.w24b 2022.w23 2022.w22 2022.w21 2022.w20 2022.w19 2022.w18 2022.w17 2022.w15 2022.w15b 2022.w14a 2022.w13 2022.w13b 2022.w13a 2022.w12 2022.w10 2022.w09 2022.w09b 2022.w08 2022.w08b 2022.w07 2022.w07b 2022.w06 2022.w06a 2022.w05 2022.w05b 2022.w03_hotfix 2022.w03_b 2022.w02 2022.w01 2021.wk46 2021.wk14_a 2021.wk13_d 2021.wk13_c 2021.w51_c 2021.w51_a 2021.w50_a 2021.w49_b 2021.w49_a 2021.w48 2021.w47 2021.w46 2021.w46-powder 2021.w45 2021.w45_b 2021.w44 2021.w43 2021.w42 2021.w37 2021.w36 2021.w35 2021.w34 2021.w33 2021.w32 2021.w31 2021.w30 2021.w29 2021.w28 2021.w27 2021.w26 2021.w25 2021.w24 2021.w23 2021.w22 2021.w20 2021.w19 2021.w18_b 2021.w18_a 2021.w17_b 2021.w16 2021.w15 2021.w14 2021.w13_a 2021.w12 2021.w11 2021.w10 2021.w09 2021.w08 2021.w06 2021.w05 2021.w04 2021.w02 2020.w51_2 2020.w51 2020.w50 2020.w49 2020.w48_2 2020.w48 2020.w47 2020.w46_2 2020.w46 2020.w45_2 2020.w45 2020.w44 2020.w42_2 2020.w42 2020.w41 2020.w39 2020.w38 2020.w37 2020.w36 2020.w34 2020.w33 2020.w31 2020.w30 2020.w29 2020.w28 2020.w26 2020.w25 2020.w24 2020.w23 2020.w22 2020.w19 2020.w17 2020.w16 2020.w15 2020.w11 2020.w09 2020.w06 2020.w05 2020.w04 2020.w03 2019.w51 2019.w44 2019.w41 2019.w36 2019.w30 2019.w28 2019.w27 2019.w25 2019.w23 2019.w21 2019.w17 2019.w15 2019.w13 2019.w11 2019.w10 2019.w09 2019.w08 2019.w07 2019.w06 2019.w04 2019.w03 2018.w51 2018.w48 2018.w47 2018.w46 2018.w44 2018.w41 2018.w40 2018.w39 2018.w36 2018.w33 2018.w31 2018.w29 2018.w25 2018.w19 2018.w15 2018.w12 2018.w11 2018.w10 v2.1.0 v2.0.0 v1.2.2 v1.2.1 v1.2.0 v1.1.1 v1.1.0 v1.0.3 v1.0.2 v1.0.1 v1.0.0 setparam osa-etsi-ws-ue osa-etsi-ws-try2 osa-etsi-ws-try1 osa-etsi-ws-gNB oai_nr_sync nr-ip-over-lte nr-ip-over-lte-v.1.5 nr-ip-over-lte-v.1.4 nr-ip-over-lte-v.1.3 nr-ip-over-lte-v.1.2 nr-ip-over-lte-v.1.1 nr-ip-over-lte-v.1.0 flexran-eol develop-nr-pdcch develop-nr-2020w03 develop-nr-2020w02 develop-nr-2019w51 develop-nr-2019w50 develop-nr-2019w48 develop-nr-2019w47 develop-nr-2019w45 develop-nr-2019w43 develop-nr-2019w42 develop-nr-2019w40 develop-nr-2019w28 develop-nr-2019w23 benetel_phase_rotation benetel_gnb_rel_2.0 benetel_gnb_rel_1.0 benetel_enb_rel_2.0 benetel_enb_rel_1.0
No related merge requests found
...@@ -391,6 +391,103 @@ int itti_send_msg_to_task(task_id_t destination_task_id, instance_t instance, Me ...@@ -391,6 +391,103 @@ int itti_send_msg_to_task(task_id_t destination_task_id, instance_t instance, Me
return 0; return 0;
} }
/* same as itti_send_msg_to_task but returns -1 in case of failure instead of crashing */
/* TODO: this is a hack - the whole logic needs a proper rework. */
/* look for HACK_RLC_UM_LIMIT for others places related to the hack. Please do not remove this comment. */
int itti_try_send_msg_to_task(task_id_t destination_task_id, instance_t instance, MessageDef *message)
{
thread_id_t destination_thread_id;
task_id_t origin_task_id;
message_list_t *new;
uint32_t priority;
message_number_t message_number;
uint32_t message_id;
AssertFatal (message != NULL, "Message is NULL!\n");
AssertFatal (destination_task_id < itti_desc.task_max, "Destination task id (%d) is out of range (%d)\n", destination_task_id, itti_desc.task_max);
destination_thread_id = TASK_GET_THREAD_ID(destination_task_id);
message->ittiMsgHeader.destinationTaskId = destination_task_id;
message->ittiMsgHeader.instance = instance;
message->ittiMsgHeader.lte_time.frame = itti_desc.lte_time.frame;
message->ittiMsgHeader.lte_time.slot = itti_desc.lte_time.slot;
message_id = message->ittiMsgHeader.messageId;
AssertFatal (message_id < itti_desc.messages_id_max, "Message id (%d) is out of range (%d)!\n", message_id, itti_desc.messages_id_max);
origin_task_id = ITTI_MSG_ORIGIN_ID(message);
priority = itti_get_message_priority (message_id);
/* Increment the global message number */
message_number = itti_increment_message_number ();
itti_dump_queue_message (origin_task_id, message_number, message, itti_desc.messages_info[message_id].name,
sizeof(MessageHeader) + message->ittiMsgHeader.ittiMsgSize);
if (destination_task_id != TASK_UNKNOWN) {
if (itti_desc.threads[destination_thread_id].task_state == TASK_STATE_ENDED) {
ITTI_DEBUG(ITTI_DEBUG_ISSUES, " Message %s, number %lu with priority %d can not be sent from %s to queue (%u:%s), ended destination task!\n",
itti_desc.messages_info[message_id].name,
message_number,
priority,
itti_get_task_name(origin_task_id),
destination_task_id,
itti_get_task_name(destination_task_id));
} else {
/* We cannot send a message if the task is not running */
AssertFatal (itti_desc.threads[destination_thread_id].task_state == TASK_STATE_READY,
"Task %s Cannot send message %s (%d) to thread %d, it is not in ready state (%d)!\n",
itti_get_task_name(origin_task_id),
itti_desc.messages_info[message_id].name,
message_id,
destination_thread_id,
itti_desc.threads[destination_thread_id].task_state);
/* Allocate new list element */
new = (message_list_t *) itti_malloc (origin_task_id, destination_task_id, sizeof(struct message_list_s));
/* Fill in members */
new->msg = message;
new->message_number = message_number;
new->message_priority = priority;
/* Enqueue message in destination task queue */
if (lfds611_queue_enqueue(itti_desc.tasks[destination_task_id].message_queue, new) == 0) {
itti_free(origin_task_id, new);
return -1;
}
{
/* Only use event fd for tasks, subtasks will pool the queue */
if (TASK_GET_PARENT_TASK_ID(destination_task_id) == TASK_UNKNOWN) {
ssize_t write_ret;
eventfd_t sem_counter = 1;
/* Call to write for an event fd must be of 8 bytes */
write_ret = write (itti_desc.threads[destination_thread_id].task_event_fd, &sem_counter, sizeof(sem_counter));
AssertFatal (write_ret == sizeof(sem_counter), "Write to task message FD (%d) failed (%d/%d)\n",
destination_thread_id, (int) write_ret, (int) sizeof(sem_counter));
}
}
ITTI_DEBUG(ITTI_DEBUG_SEND, " Message %s, number %lu with priority %d successfully sent from %s to queue (%u:%s)\n",
itti_desc.messages_info[message_id].name,
message_number,
priority,
itti_get_task_name(origin_task_id),
destination_task_id,
itti_get_task_name(destination_task_id));
}
} else {
/* This is a debug message to TASK_UNKNOWN, we can release safely release it */
int result = itti_free(origin_task_id, message);
AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result);
}
return 0;
}
void itti_subscribe_event_fd(task_id_t task_id, int fd) void itti_subscribe_event_fd(task_id_t task_id, int fd)
{ {
thread_id_t thread_id; thread_id_t thread_id;
......
...@@ -108,6 +108,18 @@ int itti_send_broadcast_message(MessageDef *message_p); ...@@ -108,6 +108,18 @@ int itti_send_broadcast_message(MessageDef *message_p);
**/ **/
int itti_send_msg_to_task(task_id_t task_id, instance_t instance, MessageDef *message); int itti_send_msg_to_task(task_id_t task_id, instance_t instance, MessageDef *message);
/* TODO: this is a hack. Almost no caller of itti_send_msg_to_task checks
* the return value so it has been changed to crash the program in case
* of failure instead of returning -1 as the documentation above says.
* The RLC UM code may receive too much data when doing UDP at a higher
* throughput than the link allows and so for this specific case we need
* a version that actually returns -1 on failure.
*
* This needs to be cleaned at some point.
*/
/* look for HACK_RLC_UM_LIMIT for others places related to the hack. Please do not remove this comment. */
int itti_try_send_msg_to_task(task_id_t task_id, instance_t instance, MessageDef *message);
/** \brief Add a new fd to monitor. /** \brief Add a new fd to monitor.
* NOTE: it is up to the user to read data associated with the fd * NOTE: it is up to the user to read data associated with the fd
* \param task_id Task ID of the receiving task * \param task_id Task ID of the receiving task
......
...@@ -738,6 +738,8 @@ typedef struct RU_t_s{ ...@@ -738,6 +738,8 @@ typedef struct RU_t_s{
void (*fh_south_asynch_in)(struct RU_t_s *ru,int *frame, int *subframe); void (*fh_south_asynch_in)(struct RU_t_s *ru,int *frame, int *subframe);
/// function pointer to initialization function for radio interface /// function pointer to initialization function for radio interface
int (*start_rf)(struct RU_t_s *ru); int (*start_rf)(struct RU_t_s *ru);
/// function pointer to release function for radio interface
int (*stop_rf)(struct RU_t_s *ru);
/// function pointer to initialization function for radio interface /// function pointer to initialization function for radio interface
int (*start_if)(struct RU_t_s *ru,struct PHY_VARS_eNB_s *eNB); int (*start_if)(struct RU_t_s *ru,struct PHY_VARS_eNB_s *eNB);
/// function pointer to RX front-end processing routine (DFTs/prefix removal or NULL) /// function pointer to RX front-end processing routine (DFTs/prefix removal or NULL)
......
...@@ -154,7 +154,7 @@ schedule_next_dlue(module_id_t module_idP, int CC_id, ...@@ -154,7 +154,7 @@ schedule_next_dlue(module_id_t module_idP, int CC_id,
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
unsigned char int
generate_dlsch_header(unsigned char *mac_header, generate_dlsch_header(unsigned char *mac_header,
unsigned char num_sdus, unsigned char num_sdus,
unsigned short *sdu_lengths, unsigned short *sdu_lengths,
...@@ -344,7 +344,6 @@ generate_dlsch_header(unsigned char *mac_header, ...@@ -344,7 +344,6 @@ generate_dlsch_header(unsigned char *mac_header,
//msg("After CEs %d\n",(uint8_t*)mac_header_ptr - mac_header); //msg("After CEs %d\n",(uint8_t*)mac_header_ptr - mac_header);
return ((unsigned char *) mac_header_ptr - mac_header); return ((unsigned char *) mac_header_ptr - mac_header);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
...@@ -563,33 +562,28 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -563,33 +562,28 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
{ {
int CC_id;
uint8_t CC_id;
int UE_id; int UE_id;
unsigned char aggregation; int aggregation;
mac_rlc_status_resp_t rlc_status; mac_rlc_status_resp_t rlc_status;
unsigned char header_len_dcch = 0, header_len_dcch_tmp = 0; int ta_len = 0;
unsigned char header_len_dtch = 0, header_len_dtch_tmp = unsigned char sdu_lcids[NB_RB_MAX];
0, header_len_dtch_last = 0; int lcid, offset, num_sdus = 0;
unsigned char ta_len = 0; int nb_rb, nb_rb_temp, nb_available_rb;
unsigned char sdu_lcids[NB_RB_MAX], lcid, offset, num_sdus = 0; uint16_t sdu_lengths[NB_RB_MAX];
uint16_t nb_rb, nb_rb_temp, nb_available_rb; int TBS, j, rnti, padding = 0, post_padding = 0;
uint16_t TBS, j, sdu_lengths[NB_RB_MAX], rnti, padding =
0, post_padding = 0;
unsigned char dlsch_buffer[MAX_DLSCH_PAYLOAD_BYTES]; unsigned char dlsch_buffer[MAX_DLSCH_PAYLOAD_BYTES];
unsigned char round = 0; int round = 0;
unsigned char harq_pid = 0; int harq_pid = 0;
eNB_UE_STATS *eNB_UE_stats = NULL; eNB_UE_STATS *eNB_UE_stats = NULL;
uint16_t sdu_length_total = 0; int sdu_length_total = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP]; eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels; COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list = &eNB->UE_list; UE_list_t *UE_list = &eNB->UE_list;
int continue_flag = 0; int continue_flag = 0;
int32_t normalized_rx_power, target_rx_power; int32_t normalized_rx_power, target_rx_power;
int32_t tpc = 1; int tpc = 1;
static int32_t tpc_accumulated = 0;
UE_sched_ctrl *ue_sched_ctl; UE_sched_ctrl *ue_sched_ctl;
int mcs; int mcs;
int i; int i;
...@@ -601,6 +595,8 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -601,6 +595,8 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
nfapi_dl_config_request_pdu_t *dl_config_pdu; nfapi_dl_config_request_pdu_t *dl_config_pdu;
int tdd_sfa; int tdd_sfa;
int ta_update; int ta_update;
int header_length_last;
int header_length_total;
#if 0 #if 0
if (UE_list->head == -1) { if (UE_list->head == -1) {
...@@ -792,6 +788,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -792,6 +788,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) continue; if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) continue;
header_length_total = 0;
sdu_length_total = 0; sdu_length_total = 0;
num_sdus = 0; num_sdus = 0;
...@@ -838,8 +835,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -838,8 +835,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
if (cc[CC_id].tdd_Config != NULL) { if (cc[CC_id].tdd_Config != NULL) {
UE_list->UE_template[CC_id][UE_id].DAI++; UE_list->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(module_idP, CC_id, rnti, update_ul_dci(module_idP, CC_id, rnti,
UE_list->UE_template[CC_id][UE_id]. UE_list->UE_template[CC_id][UE_id].DAI);
DAI);
LOG_D(MAC, LOG_D(MAC,
"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n", "DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id, subframeP, UE_id, CC_id, subframeP, UE_id,
...@@ -988,13 +984,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -988,13 +984,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
module_idP, frameP, CC_id, UE_id); module_idP, frameP, CC_id, UE_id);
} }
} else { /* This is a potentially new SDU opportunity */ } else { /* This is a potentially new SDU opportunity */
rlc_status.bytes_in_buffer = 0; rlc_status.bytes_in_buffer = 0;
// Now check RLC information to compute number of required RBs // Now check RLC information to compute number of required RBs
// get maximum TBS size for RLC request // get maximum TBS size for RLC request
TBS = TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb);
get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb);
// check first for RLC data on DCCH
// add the length for all the control elements (timing adv, drx, etc) : header + payload // add the length for all the control elements (timing adv, drx, etc) : header + payload
if (ue_sched_ctl->ta_timer == 0) { if (ue_sched_ctl->ta_timer == 0) {
...@@ -1009,38 +1004,42 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1009,38 +1004,42 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
ta_len = (ta_update != 31) ? 2 : 0; ta_len = (ta_update != 31) ? 2 : 0;
header_len_dcch = 2; // 2 bytes DCCH SDU subheader // RLC data on DCCH
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
if (TBS - ta_len - header_len_dcch > 0) { rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH,
rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH, (TBS - ta_len - header_len_dcch)); // transport block set size TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[0] = 0; sdu_lengths[0] = 0;
if (rlc_status.bytes_in_buffer > 0) { // There is DCCH to transmit if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
"[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, subframeP, CC_id, module_idP, frameP, subframeP, CC_id,
TBS - header_len_dcch); TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[0] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH, TBS, //not used
(char *) sdu_lengths[0] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH,
&dlsch_buffer TBS, //not used
[0]); (char *)&dlsch_buffer[0]);
T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(DCCH), T_INT(subframeP), T_INT(harq_pid), T_INT(DCCH),
T_INT(sdu_lengths[0])); T_INT(sdu_lengths[0]));
LOG_D(MAC, LOG_D(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n",
"[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n",
module_idP, CC_id, sdu_lengths[0]); module_idP, CC_id, sdu_lengths[0]);
sdu_length_total = sdu_lengths[0]; sdu_length_total = sdu_lengths[0];
sdu_lcids[0] = DCCH; sdu_lcids[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH; UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0]; UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0];
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1; UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0]; UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0];
header_length_last = 1 + 1 + (sdu_lengths[0] >= 128);
header_length_total += header_length_last;
num_sdus = 1; num_sdus = 1;
#ifdef DEBUG_eNB_SCHEDULER #ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, LOG_T(MAC,
"[eNB %d][DCCH] CC_id %d Got %d bytes :", "[eNB %d][DCCH] CC_id %d Got %d bytes :",
...@@ -1052,26 +1051,25 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1052,26 +1051,25 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
LOG_T(MAC, "\n"); LOG_T(MAC, "\n");
#endif #endif
} else {
header_len_dcch = 0;
sdu_length_total = 0;
} }
} }
// check for DCCH1 and update header information (assume 2 byte sub-header)
if (TBS - ta_len - header_len_dcch - sdu_length_total > 0) { // RLC data on DCCH1
rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1, (TBS - ta_len - header_len_dcch - sdu_length_total)); // transport block set size less allocations for timing advance and if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1,
TBS - ta_len - header_length_total - sdu_length_total - 3);
// DCCH SDU // DCCH SDU
sdu_lengths[num_sdus] = 0; sdu_lengths[num_sdus] = 0;
if (rlc_status.bytes_in_buffer > 0) { if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, LOG_D(MAC, "[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
"[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, CC_id, module_idP, frameP, CC_id,
TBS - header_len_dcch - sdu_length_total); TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1, TBS, //not used
(char *) sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1,
&dlsch_buffer TBS, //not used
[sdu_length_total]); (char *)&dlsch_buffer[sdu_length_total]);
T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
...@@ -1080,13 +1078,16 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1080,13 +1078,16 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
sdu_lcids[num_sdus] = DCCH1; sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus]; sdu_length_total += sdu_lengths[num_sdus];
header_len_dcch += 2;
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1; UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus]; UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id]. UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1;
num_pdu_tx[DCCH1] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus]; UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++; num_sdus++;
#ifdef DEBUG_eNB_SCHEDULER #ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, LOG_T(MAC,
"[eNB %d][DCCH1] CC_id %d Got %d bytes :", "[eNB %d][DCCH1] CC_id %d Got %d bytes :",
...@@ -1098,25 +1099,18 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1098,25 +1099,18 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
LOG_T(MAC, "\n"); LOG_T(MAC, "\n");
#endif #endif
} }
} }
// assume the max dtch header size, and adjust it later
header_len_dtch = 0; // TODO: lcid has to be sorted before the actual allocation (similar struct as ue_list).
header_len_dtch_last = 0; // the header length of the last mac sdu
// lcid has to be sorted before the actual allocation (similar struct as ue_list).
for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--) { for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--) {
// TBD: check if the lcid is active // TODO: check if the lcid is active
header_len_dtch += 3; LOG_D(MAC, "[eNB %d], Frame %d, DTCH%d->DLSCH, Checking RLC status (tbs %d, len %d)\n",
header_len_dtch_last = 3;
LOG_D(MAC,
"[eNB %d], Frame %d, DTCH%d->DLSCH, Checking RLC status (tbs %d, len %d)\n",
module_idP, frameP, lcid, TBS, module_idP, frameP, lcid, TBS,
TBS - ta_len - header_len_dcch - TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_length_total - header_len_dtch);
if (TBS - ta_len - header_len_dcch - sdu_length_total - header_len_dtch > 0) { // NN: > 2 ? if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP, rlc_status = mac_rlc_status_ind(module_idP,
rnti, rnti,
module_idP, module_idP,
...@@ -1125,25 +1119,22 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1125,25 +1119,22 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
ENB_FLAG_YES, ENB_FLAG_YES,
MBMS_FLAG_NO, MBMS_FLAG_NO,
lcid, lcid,
TBS - ta_len - TBS - ta_len - header_length_total - sdu_length_total - 3);
header_len_dcch -
sdu_length_total -
header_len_dtch);
if (rlc_status.bytes_in_buffer > 0) {
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, LOG_D(MAC,
"[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n", "[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n",
module_idP, frameP, module_idP, frameP,
TBS - header_len_dcch - TBS - ta_len - header_length_total - sdu_length_total - 3,
sdu_length_total - header_len_dtch, lcid, lcid,
header_len_dtch); header_length_total);
sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, lcid, TBS, //not used
(char sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, lcid,
*) TBS, //not used
&dlsch_buffer (char *)&dlsch_buffer[sdu_length_total]);
[sdu_length_total]);
T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(subframeP), T_INT(harq_pid),
...@@ -1152,50 +1143,39 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1152,50 +1143,39 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
LOG_D(MAC, LOG_D(MAC,
"[eNB %d][USER-PLANE DEFAULT DRB] Got %d bytes for DTCH %d \n", "[eNB %d][USER-PLANE DEFAULT DRB] Got %d bytes for DTCH %d \n",
module_idP, sdu_lengths[num_sdus], lcid); module_idP, sdu_lengths[num_sdus], lcid);
sdu_lcids[num_sdus] = lcid; sdu_lcids[num_sdus] = lcid;
sdu_length_total += sdu_lengths[num_sdus]; sdu_length_total += sdu_lengths[num_sdus];
UE_list-> UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid]++;
eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid]
+= 1;
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid; UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus]; UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus];
UE_list-> UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus];
eNB_UE_stats[CC_id][UE_id].num_bytes_tx
[lcid] += sdu_lengths[num_sdus]; header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
if (sdu_lengths[num_sdus] < 128) { header_length_total += header_length_last;
header_len_dtch--;
header_len_dtch_last--;
}
num_sdus++; num_sdus++;
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0; UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
} // no data for this LCID
else {
header_len_dtch -= 3;
} }
} // no TBS left } else {
else { // no TBS left
header_len_dtch -= 3;
break; break;
} }
} }
if (header_len_dtch == 0)
header_len_dtch_last = 0; /* last header does not have length field */
// there is at least one SDU if (header_length_total) {
header_length_total -= header_length_last;
header_length_total++;
}
// there is at least one SDU or TA command
// if (num_sdus > 0 ){ // if (num_sdus > 0 ){
if ((sdu_length_total + header_len_dcch + if (ta_len + sdu_length_total + header_length_total > 0) {
header_len_dtch) > 0) {
// Now compute number of required RBs for total sdu length // Now compute number of required RBs for total sdu length
// Assume RAH format 2 // Assume RAH format 2
// adjust header lengths
header_len_dcch_tmp = header_len_dcch;
header_len_dtch_tmp = header_len_dtch;
if (header_len_dtch == 0) {
header_len_dcch = (header_len_dcch > 0) ? 1 : 0; //header_len_dcch; // remove length field
} else {
header_len_dtch_last -= 1; // now use it to find how many bytes has to be removed for the last MAC SDU
header_len_dtch = (header_len_dtch > 0) ? header_len_dtch - header_len_dtch_last : header_len_dtch; // remove length field for the last SDU
}
mcs = eNB_UE_stats->dlsch_mcs1; mcs = eNB_UE_stats->dlsch_mcs1;
...@@ -1207,16 +1187,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1207,16 +1187,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
TBS = get_TBS_DL(mcs, nb_rb); TBS = get_TBS_DL(mcs, nb_rb);
while (TBS < while (TBS < sdu_length_total + header_length_total + ta_len) {
(sdu_length_total + header_len_dcch +
header_len_dtch + ta_len)) {
nb_rb += min_rb_unit[CC_id]; // nb_rb += min_rb_unit[CC_id]; //
if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs
// (can happen if N_RB_DL is odd) // (can happen if N_RB_DL is odd)
TBS = TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb);
get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_available_rb);
nb_rb = nb_available_rb; nb_rb = nb_available_rb;
break; break;
} }
...@@ -1250,13 +1226,13 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1250,13 +1226,13 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
} }
// decrease mcs until TBS falls below required length // decrease mcs until TBS falls below required length
while ((TBS > (sdu_length_total + header_len_dcch + header_len_dtch + ta_len)) && (mcs > 0)) { while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) {
mcs--; mcs--;
TBS = get_TBS_DL(mcs, nb_rb); TBS = get_TBS_DL(mcs, nb_rb);
} }
// if we have decreased too much or we don't have enough RBs, increase MCS // if we have decreased too much or we don't have enough RBs, increase MCS
while ((TBS < (sdu_length_total + header_len_dcch + header_len_dtch + ta_len)) while ((TBS < sdu_length_total + header_length_total + ta_len)
&& (((ue_sched_ctl->dl_pow_off[CC_id] > 0) && (((ue_sched_ctl->dl_pow_off[CC_id] > 0)
&& (mcs < 28)) && (mcs < 28))
|| ((ue_sched_ctl->dl_pow_off[CC_id] == 0) || ((ue_sched_ctl->dl_pow_off[CC_id] == 0)
...@@ -1277,23 +1253,14 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1277,23 +1253,14 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
// TBS, sdu_length_total, offset, TBS-sdu_length_total-offset); // TBS, sdu_length_total, offset, TBS-sdu_length_total-offset);
#endif #endif
if ((TBS - header_len_dcch - header_len_dtch - sdu_length_total - ta_len) <= 2) { if (TBS - header_length_total - sdu_length_total - ta_len <= 2) {
padding = (TBS - header_len_dcch - header_len_dtch - sdu_length_total - ta_len); padding = TBS - header_length_total - sdu_length_total - ta_len;
post_padding = 0; post_padding = 0;
} else { } else {
padding = 0; padding = 0;
post_padding = 1;
// adjust the header len
if (header_len_dtch == 0) {
header_len_dcch = header_len_dcch_tmp;
} else { //if (( header_len_dcch==0)&&((header_len_dtch==1)||(header_len_dtch==2)))
header_len_dtch = header_len_dtch_tmp;
}
post_padding = TBS - sdu_length_total - header_len_dcch - header_len_dtch - ta_len; // 1 is for the postpadding header
} }
offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus
sdu_lengths, // sdu_lengths, //
sdu_lcids, 255, // no drx sdu_lcids, 255, // no drx
...@@ -1304,12 +1271,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1304,12 +1271,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
//#ifdef DEBUG_eNB_SCHEDULER //#ifdef DEBUG_eNB_SCHEDULER
if (ta_update != 31) { if (ta_update != 31) {
LOG_D(MAC, LOG_D(MAC,
"[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_dcch %d, header_dtch %d\n", "[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_length %d\n",
module_idP, frameP, UE_id, CC_id, module_idP, frameP, UE_id, CC_id,
sdu_length_total, num_sdus, sdu_lengths[0], sdu_length_total, num_sdus, sdu_lengths[0],
sdu_lcids[0], offset, ta_update, padding, sdu_lcids[0], offset, ta_update, padding,
post_padding, mcs, TBS, nb_rb, post_padding, mcs, TBS, nb_rb,
header_len_dcch, header_len_dtch); header_length_total);
} }
//#endif //#endif
#ifdef DEBUG_eNB_SCHEDULER #ifdef DEBUG_eNB_SCHEDULER
...@@ -1327,11 +1294,16 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1327,11 +1294,16 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
dlsch_buffer, sdu_length_total); dlsch_buffer, sdu_length_total);
// memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]);
#if 0
// fill remainder of DLSCH with random data // fill remainder of DLSCH with random data
for (j = 0; j < (TBS - sdu_length_total - offset); j++) { for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = (char) (taus() & 0xff); UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = (char) (taus() & 0xff);
} }
#endif
// fill remainder of DLSCH with 0
for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = 0;
}
if (opt_enabled == 1) { if (opt_enabled == 1) {
trace_pdu(1, (uint8_t *) trace_pdu(1, (uint8_t *)
...@@ -1348,8 +1320,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1348,8 +1320,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP), T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(subframeP), T_INT(harq_pid),
T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id]. T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
payload[0], TBS));
UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb; UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
...@@ -1378,6 +1349,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1378,6 +1349,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
UE_list->UE_template[CC_id][UE_id]. UE_list->UE_template[CC_id][UE_id].
DAI); DAI);
} }
// do PUCCH power control // do PUCCH power control
// this is the normalized RX power // this is the normalized RX power
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
...@@ -1399,18 +1371,15 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1399,18 +1371,15 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
if (normalized_rx_power > (target_rx_power + 4)) { if (normalized_rx_power > (target_rx_power + 4)) {
tpc = 0; //-1 tpc = 0; //-1
tpc_accumulated--;
} else if (normalized_rx_power < (target_rx_power - 4)) { } else if (normalized_rx_power < (target_rx_power - 4)) {
tpc = 2; //+1 tpc = 2; //+1
tpc_accumulated++;
} else { } else {
tpc = 1; //0 tpc = 1; //0
} }
LOG_D(MAC, LOG_D(MAC,
"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n", "[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, normalized/target rx power %d/%d\n",
module_idP, frameP, subframeP, harq_pid, module_idP, frameP, subframeP, harq_pid, tpc,
tpc, tpc_accumulated,
normalized_rx_power, target_rx_power); normalized_rx_power, target_rx_power);
} // Po_PUCCH has been updated } // Po_PUCCH has been updated
...@@ -1456,13 +1425,13 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1456,13 +1425,13 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
module_idP, CC_id, harq_pid, mcs); module_idP, CC_id, harq_pid, mcs);
} }
LOG_D(MAC, "Checking feasibility pdu %d (new sdu)\n", LOG_D(MAC, "Checking feasibility pdu %d (new sdu)\n",
dl_req->number_pdu); dl_req->number_pdu);
if (!CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, if (!CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP,
dl_config_pdu->dci_dl_pdu. dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.aggregation_level, rnti)) { dci_dl_pdu_rel8.aggregation_level, rnti)) {
ue_sched_ctl->round[CC_id][harq_pid] = 0; ue_sched_ctl->round[CC_id][harq_pid] = 0;
dl_req->number_dci++; dl_req->number_dci++;
dl_req->number_pdu++; dl_req->number_pdu++;
...@@ -1482,8 +1451,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1482,8 +1451,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs; UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0; UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated != AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated != NULL,
NULL,
"physicalConfigDedicated is NULL\n"); "physicalConfigDedicated is NULL\n");
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated != NULL, AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated != NULL,
"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); "physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
...@@ -1506,15 +1474,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1506,15 +1474,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
0, //number of PRBs treated as one subband, not used here 0, //number of PRBs treated as one subband, not used here
0 // number of beamforming vectors, not used here 0 // number of beamforming vectors, not used here
); );
eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB-> eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body,
TX_req
[CC_id].tx_request_body,
(frameP * 10) + subframeP, (frameP * 10) + subframeP,
TBS, eNB->pdu_index[CC_id], TBS, eNB->pdu_index[CC_id],
eNB-> eNB->UE_list.DLSCH_pdu[CC_id][0][UE_id].payload[0]);
UE_list.DLSCH_pdu[CC_id][0][(unsigned char)
UE_id].payload
[0]);
LOG_D(MAC, LOG_D(MAC,
"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n", "Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",
...@@ -1523,10 +1486,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1523,10 +1486,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
eNB->pdu_index[CC_id]++; eNB->pdu_index[CC_id]++;
program_dlsch_acknak(module_idP, CC_id, UE_id, program_dlsch_acknak(module_idP, CC_id, UE_id,
frameP, subframeP, frameP, subframeP,
dl_config_pdu-> dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
dci_dl_pdu.dci_dl_pdu_rel8.
cce_idx);
} else { } else {
LOG_W(MAC, LOG_W(MAC,
"Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n", "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n",
...@@ -1550,7 +1510,6 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, ...@@ -1550,7 +1510,6 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
stop_meas(&eNB->schedule_dlsch); stop_meas(&eNB->schedule_dlsch);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_OUT); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_OUT);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
......
...@@ -855,15 +855,15 @@ in the DLSCH buffer. ...@@ -855,15 +855,15 @@ in the DLSCH buffer.
@param post_padding number of bytes for padding at the end of MAC PDU @param post_padding number of bytes for padding at the end of MAC PDU
@returns Number of bytes used for header @returns Number of bytes used for header
*/ */
unsigned char generate_dlsch_header(unsigned char *mac_header, int generate_dlsch_header(unsigned char *mac_header,
unsigned char num_sdus, unsigned char num_sdus,
unsigned short *sdu_lengths, unsigned short *sdu_lengths,
unsigned char *sdu_lcids, unsigned char *sdu_lcids,
unsigned char drx_cmd, unsigned char drx_cmd,
unsigned short timing_advance_cmd, unsigned short timing_advance_cmd,
unsigned char *ue_cont_res_id, unsigned char *ue_cont_res_id,
unsigned char short_padding, unsigned char short_padding,
unsigned short post_padding); unsigned short post_padding);
/** \brief RRC eNB Configuration primitive for PHY/MAC. Allows configuration of PHY/MAC resources based on System Information (SI), RRCConnectionSetup and RRCConnectionReconfiguration messages. /** \brief RRC eNB Configuration primitive for PHY/MAC. Allows configuration of PHY/MAC resources based on System Information (SI), RRCConnectionSetup and RRCConnectionReconfiguration messages.
@param Mod_id Instance ID of eNB @param Mod_id Instance ID of eNB
......
...@@ -437,6 +437,15 @@ rlc_op_status_t rlc_data_req (const protocol_ctxt_t* const ctxt_pP, ...@@ -437,6 +437,15 @@ rlc_op_status_t rlc_data_req (const protocol_ctxt_t* const ctxt_pP,
break; break;
case RLC_MODE_UM: case RLC_MODE_UM:
/* TODO: this is a hack, needs better solution. Let's not use too
* much memory and store at maximum 5 millions bytes.
*/
/* look for HACK_RLC_UM_LIMIT for others places related to the hack. Please do not remove this comment. */
if (rlc_um_get_buffer_occupancy(&rlc_union_p->rlc.um) > 5000000) {
free_mem_block(sdu_pP, __func__);
return RLC_OP_STATUS_OUT_OF_RESSOURCES;
}
new_sdu_p = get_free_mem_block (sdu_sizeP + sizeof (struct rlc_um_data_req_alloc), __func__); new_sdu_p = get_free_mem_block (sdu_sizeP + sizeof (struct rlc_um_data_req_alloc), __func__);
if (new_sdu_p != NULL) { if (new_sdu_p != NULL) {
......
...@@ -270,10 +270,21 @@ void udp_eNB_receiver(struct udp_socket_desc_s *udp_sock_pP) ...@@ -270,10 +270,21 @@ void udp_eNB_receiver(struct udp_socket_desc_s *udp_sock_pP)
n, inet_ntoa(addr.sin_addr), ntohs(addr.sin_port)); n, inet_ntoa(addr.sin_addr), ntohs(addr.sin_port));
#endif #endif
if (itti_send_msg_to_task(udp_sock_pP->task_id, INSTANCE_DEFAULT, message_p) < 0) { /* TODO: this is a hack. Let's accept failures and do nothing when
* it happens. Since itti_send_msg_to_task crashes when the message
* queue is full we wrote itti_try_send_msg_to_task that returns -1
* if the queue is full.
*/
/* look for HACK_RLC_UM_LIMIT for others places related to the hack. Please do not remove this comment. */
//if (itti_send_msg_to_task(udp_sock_pP->task_id, INSTANCE_DEFAULT, message_p) < 0) {
if (itti_try_send_msg_to_task(udp_sock_pP->task_id, INSTANCE_DEFAULT, message_p) < 0) {
#if 0
LOG_I(UDP_, "Failed to send message %d to task %d\n", LOG_I(UDP_, "Failed to send message %d to task %d\n",
UDP_DATA_IND, UDP_DATA_IND,
udp_sock_pP->task_id); udp_sock_pP->task_id);
#endif
itti_free(TASK_UDP, message_p);
itti_free(TASK_UDP, forwarded_buffer);
return; return;
} }
} }
......
...@@ -163,13 +163,6 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset); ...@@ -163,13 +163,6 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_name) { static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_name) {
static double cpu_freq_GHz = 0.0;
if (cpu_freq_GHz == 0.0)
cpu_freq_GHz = get_cpu_freq_GHz();
start_meas(&softmodem_stats_rxtx_sf); start_meas(&softmodem_stats_rxtx_sf);
// ******************************************************************* // *******************************************************************
......
...@@ -1523,7 +1523,13 @@ static void* ru_thread( void* param ) { ...@@ -1523,7 +1523,13 @@ static void* ru_thread( void* param ) {
printf( "Exiting ru_thread \n"); printf( "Exiting ru_thread \n");
if (ru->stop_rf != NULL) {
if (ru->stop_rf(ru) != 0)
LOG_E(HW,"Could not stop the RF device\n");
else LOG_I(PHY,"RU %d rf device stopped\n",ru->idx);
}
ru_thread_status = 0; ru_thread_status = 0;
return &ru_thread_status; return &ru_thread_status;
...@@ -1617,6 +1623,12 @@ int start_rf(RU_t *ru) { ...@@ -1617,6 +1623,12 @@ int start_rf(RU_t *ru) {
return(ru->rfdevice.trx_start_func(&ru->rfdevice)); return(ru->rfdevice.trx_start_func(&ru->rfdevice));
} }
int stop_rf(RU_t *ru)
{
ru->rfdevice.trx_end_func(&ru->rfdevice);
return 0;
}
extern void fep_full(RU_t *ru); extern void fep_full(RU_t *ru);
extern void ru_fep_full_2thread(RU_t *ru); extern void ru_fep_full_2thread(RU_t *ru);
extern void feptx_ofdm(RU_t *ru); extern void feptx_ofdm(RU_t *ru);
...@@ -2082,6 +2094,7 @@ void set_function_spec_param(RU_t *ru) ...@@ -2082,6 +2094,7 @@ void set_function_spec_param(RU_t *ru)
ru->fh_south_in = rx_rf; // local synchronous RF RX ru->fh_south_in = rx_rf; // local synchronous RF RX
ru->fh_south_out = tx_rf; // local synchronous RF TX ru->fh_south_out = tx_rf; // local synchronous RF TX
ru->start_rf = start_rf; // need to start the local RF interface ru->start_rf = start_rf; // need to start the local RF interface
ru->stop_rf = stop_rf;
printf("configuring ru_id %d (start_rf %p)\n", ru->idx, start_rf); printf("configuring ru_id %d (start_rf %p)\n", ru->idx, start_rf);
/* /*
if (ru->function == eNodeB_3GPP) { // configure RF parameters only for 3GPP eNodeB, we need to get them from RAU otherwise if (ru->function == eNodeB_3GPP) { // configure RF parameters only for 3GPP eNodeB, we need to get them from RAU otherwise
...@@ -2113,6 +2126,7 @@ void set_function_spec_param(RU_t *ru) ...@@ -2113,6 +2126,7 @@ void set_function_spec_param(RU_t *ru)
ru->fh_south_asynch_in = NULL; // no asynchronous UL ru->fh_south_asynch_in = NULL; // no asynchronous UL
} }
ru->start_rf = NULL; // no local RF ru->start_rf = NULL; // no local RF
ru->stop_rf = NULL;
ru->start_if = start_if; // need to start if interface for IF5 ru->start_if = start_if; // need to start if interface for IF5
ru->ifdevice.host_type = RAU_HOST; ru->ifdevice.host_type = RAU_HOST;
ru->ifdevice.eth_params = &ru->eth_params; ru->ifdevice.eth_params = &ru->eth_params;
...@@ -2137,6 +2151,7 @@ void set_function_spec_param(RU_t *ru) ...@@ -2137,6 +2151,7 @@ void set_function_spec_param(RU_t *ru)
ru->fh_north_out = NULL; ru->fh_north_out = NULL;
ru->fh_north_asynch_in = NULL; ru->fh_north_asynch_in = NULL;
ru->start_rf = NULL; // no local RF ru->start_rf = NULL; // no local RF
ru->stop_rf = NULL;
ru->start_if = start_if; // need to start if interface for IF4p5 ru->start_if = start_if; // need to start if interface for IF4p5
ru->ifdevice.host_type = RAU_HOST; ru->ifdevice.host_type = RAU_HOST;
ru->ifdevice.eth_params = &ru->eth_params; ru->ifdevice.eth_params = &ru->eth_params;
......
...@@ -1005,10 +1005,8 @@ int main( int argc, char **argv ) ...@@ -1005,10 +1005,8 @@ int main( int argc, char **argv )
printf("Runtime table\n"); printf("Runtime table\n");
fill_modeled_runtime_table(runtime_phy_rx,runtime_phy_tx); fill_modeled_runtime_table(runtime_phy_rx,runtime_phy_tx);
cpuf=get_cpu_freq_GHz();
#ifndef DEADLINE_SCHEDULER #ifndef DEADLINE_SCHEDULER
printf("NO deadline scheduler\n"); printf("NO deadline scheduler\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment