Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Michael Black
OpenXG-RAN
Commits
86cf5e70
Commit
86cf5e70
authored
Aug 25, 2022
by
francescomani
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
reworking gNB PUCCH scheduling
parent
84611333
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
233 additions
and
367 deletions
+233
-367
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c
+4
-6
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c
+44
-8
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c
+175
-350
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
+5
-2
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
+5
-1
No files found.
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c
View file @
86cf5e70
...
@@ -215,8 +215,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
...
@@ -215,8 +215,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
// Schedule CSI-RS transmission
// Schedule CSI-RS transmission
nr_csirs_scheduling
(
module_idP
,
frame
,
slot
,
nr_slots_per_frame
[
*
scc
->
ssbSubcarrierSpacing
]);
nr_csirs_scheduling
(
module_idP
,
frame
,
slot
,
nr_slots_per_frame
[
*
scc
->
ssbSubcarrierSpacing
]);
// Schedule CSI measurement reporting: check in slot 0 for the whole frame
// Schedule CSI measurement reporting
if
(
slot
==
0
)
nr_csi_meas_reporting
(
module_idP
,
frame
,
slot
);
nr_csi_meas_reporting
(
module_idP
,
frame
,
slot
);
// Schedule SRS: check in slot 0 for the whole frame
// Schedule SRS: check in slot 0 for the whole frame
...
@@ -237,10 +236,9 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
...
@@ -237,10 +236,9 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
nr_schedule_ue_spec
(
module_idP
,
frame
,
slot
);
nr_schedule_ue_spec
(
module_idP
,
frame
,
slot
);
stop_meas
(
&
gNB
->
schedule_dlsch
);
stop_meas
(
&
gNB
->
schedule_dlsch
);
nr_s
chedule_pucch
(
RC
.
nrmac
[
module_idP
],
frame
,
slot
);
nr_s
r_reporting
(
RC
.
nrmac
[
module_idP
],
frame
,
slot
,
module_idP
);
// This schedule SR after PUCCH for multiplexing
nr_schedule_pucch
(
RC
.
nrmac
[
module_idP
],
frame
,
slot
);
nr_sr_reporting
(
RC
.
nrmac
[
module_idP
],
frame
,
slot
);
stop_meas
(
&
RC
.
nrmac
[
module_idP
]
->
eNB_scheduler
);
stop_meas
(
&
RC
.
nrmac
[
module_idP
]
->
eNB_scheduler
);
...
...
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c
View file @
86cf5e70
...
@@ -1087,7 +1087,7 @@ void nr_configure_pucch(nfapi_nr_pucch_pdu_t* pucch_pdu,
...
@@ -1087,7 +1087,7 @@ void nr_configure_pucch(nfapi_nr_pucch_pdu_t* pucch_pdu,
pucch_pdu
->
cyclic_prefix
=
(
current_BWP
->
cyclicprefix
==
NULL
)
?
0
:
*
current_BWP
->
cyclicprefix
;
pucch_pdu
->
cyclic_prefix
=
(
current_BWP
->
cyclicprefix
==
NULL
)
?
0
:
*
current_BWP
->
cyclicprefix
;
NR_PUCCH_Config_t
*
pucch_Config
=
current_BWP
->
pucch_Config
;
NR_PUCCH_Config_t
*
pucch_Config
=
current_BWP
->
pucch_Config
;
if
(
r_pucch
<
0
||
pucch_Config
){
if
(
r_pucch
<
0
||
pucch_Config
)
{
LOG_D
(
NR_MAC
,
"pucch_acknak: Filling dedicated configuration for PUCCH
\n
"
);
LOG_D
(
NR_MAC
,
"pucch_acknak: Filling dedicated configuration for PUCCH
\n
"
);
AssertFatal
(
pucch_Config
->
resourceSetToAddModList
!=
NULL
,
AssertFatal
(
pucch_Config
->
resourceSetToAddModList
!=
NULL
,
...
@@ -2375,6 +2375,17 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
...
@@ -2375,6 +2375,17 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
else
else
UL_BWP
->
pucch_ConfigCommon
=
scc
->
uplinkConfigCommon
->
initialUplinkBWP
->
pucch_ConfigCommon
->
choice
.
setup
;
UL_BWP
->
pucch_ConfigCommon
=
scc
->
uplinkConfigCommon
->
initialUplinkBWP
->
pucch_ConfigCommon
->
choice
.
setup
;
UL_BWP
->
max_fb_time
=
0
;
if
(
DL_BWP
->
dci_format
!=
NR_DL_DCI_FORMAT_1_0
&&
UL_BWP
->
pucch_Config
)
{
for
(
int
i
=
0
;
i
<
UL_BWP
->
pucch_Config
->
dl_DataToUL_ACK
->
list
.
count
;
i
++
)
{
if
(
*
UL_BWP
->
pucch_Config
->
dl_DataToUL_ACK
->
list
.
array
[
i
]
>
UL_BWP
->
max_fb_time
)
UL_BWP
->
max_fb_time
=
*
UL_BWP
->
pucch_Config
->
dl_DataToUL_ACK
->
list
.
array
[
i
];
}
}
else
UL_BWP
->
max_fb_time
=
8
;
// default value
if
(
UE
)
{
if
(
UE
)
{
// setting PDCCH related structures for sched_ctrl
// setting PDCCH related structures for sched_ctrl
...
@@ -2410,6 +2421,8 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
...
@@ -2410,6 +2421,8 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
if
(
UL_BWP
->
csi_MeasConfig
)
if
(
UL_BWP
->
csi_MeasConfig
)
compute_csi_bitlen
(
UL_BWP
->
csi_MeasConfig
,
UE
->
csi_report_template
);
compute_csi_bitlen
(
UL_BWP
->
csi_MeasConfig
,
UE
->
csi_report_template
);
set_sched_pucch_list
(
sched_ctrl
,
UL_BWP
,
scc
);
}
}
if
(
ra
)
{
if
(
ra
)
{
...
@@ -2456,6 +2469,7 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
...
@@ -2456,6 +2469,7 @@ void configure_UE_BWP(gNB_MAC_INST *nr_mac,
NR_RNTI_C
,
NR_RNTI_C
,
target_ss
,
target_ss
,
false
);
false
);
}
}
void
reset_srs_stats
(
NR_UE_info_t
*
UE
)
{
void
reset_srs_stats
(
NR_UE_info_t
*
UE
)
{
...
@@ -2553,6 +2567,34 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
...
@@ -2553,6 +2567,34 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
return
(
UE
);
return
(
UE
);
}
}
void
set_sched_pucch_list
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_UE_UL_BWP_t
*
ul_bwp
,
NR_ServingCellConfigCommon_t
*
scc
)
{
const
NR_TDD_UL_DL_Pattern_t
*
tdd
=
scc
->
tdd_UL_DL_ConfigurationCommon
?
&
scc
->
tdd_UL_DL_ConfigurationCommon
->
pattern1
:
NULL
;
const
int
n_slots_frame
=
nr_slots_per_frame
[
ul_bwp
->
scs
];
const
int
nr_slots_period
=
tdd
?
n_slots_frame
/
get_nb_periods_per_frame
(
tdd
->
dl_UL_TransmissionPeriodicity
)
:
n_slots_frame
;
const
int
n_ul_slots_period
=
tdd
?
tdd
->
nrofUplinkSlots
+
(
tdd
->
nrofUplinkSymbols
>
0
?
1
:
0
)
:
n_slots_frame
;
const
int
list_size
=
n_ul_slots_period
<<
(
ul_bwp
->
max_fb_time
/
nr_slots_period
);
if
(
!
sched_ctrl
->
sched_pucch
)
{
sched_ctrl
->
sched_pucch
=
malloc
(
list_size
*
sizeof
(
*
sched_ctrl
->
sched_pucch
));
sched_ctrl
->
sched_pucch_size
=
list_size
;
for
(
int
i
=
0
;
i
<
list_size
;
i
++
)
{
NR_sched_pucch_t
*
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
i
];
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
}
}
else
if
(
list_size
>
sched_ctrl
->
sched_pucch_size
)
{
sched_ctrl
->
sched_pucch
=
realloc
(
sched_ctrl
->
sched_pucch
,
list_size
*
sizeof
(
*
sched_ctrl
->
sched_pucch
));
for
(
int
i
=
sched_ctrl
->
sched_pucch_size
;
i
<
list_size
;
i
++
){
NR_sched_pucch_t
*
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
i
];
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
}
sched_ctrl
->
sched_pucch_size
=
list_size
;
}
}
void
create_dl_harq_list
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
void
create_dl_harq_list
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
const
NR_PDSCH_ServingCellConfig_t
*
pdsch
)
{
const
NR_PDSCH_ServingCellConfig_t
*
pdsch
)
{
const
int
nrofHARQ
=
pdsch
&&
pdsch
->
nrofHARQ_ProcessesForPDSCH
?
const
int
nrofHARQ
=
pdsch
&&
pdsch
->
nrofHARQ_ProcessesForPDSCH
?
...
@@ -2672,23 +2714,17 @@ uint8_t nr_get_tpc(int target, uint8_t cqi, int incr) {
...
@@ -2672,23 +2714,17 @@ uint8_t nr_get_tpc(int target, uint8_t cqi, int incr) {
void
get_pdsch_to_harq_feedback
(
NR_PUCCH_Config_t
*
pucch_Config
,
void
get_pdsch_to_harq_feedback
(
NR_PUCCH_Config_t
*
pucch_Config
,
nr_dci_format_t
dci_format
,
nr_dci_format_t
dci_format
,
int
*
max_fb_time
,
uint8_t
*
pdsch_to_harq_feedback
)
{
uint8_t
*
pdsch_to_harq_feedback
)
{
if
(
dci_format
==
NR_DL_DCI_FORMAT_1_0
)
{
if
(
dci_format
==
NR_DL_DCI_FORMAT_1_0
)
{
for
(
int
i
=
0
;
i
<
8
;
i
++
)
{
for
(
int
i
=
0
;
i
<
8
;
i
++
)
pdsch_to_harq_feedback
[
i
]
=
i
+
1
;
pdsch_to_harq_feedback
[
i
]
=
i
+
1
;
if
(
pdsch_to_harq_feedback
[
i
]
>*
max_fb_time
)
*
max_fb_time
=
pdsch_to_harq_feedback
[
i
];
}
}
}
else
{
else
{
AssertFatal
(
pucch_Config
!=
NULL
,
"pucch_Config shouldn't be null here
\n
"
);
AssertFatal
(
pucch_Config
!=
NULL
,
"pucch_Config shouldn't be null here
\n
"
);
if
(
pucch_Config
->
dl_DataToUL_ACK
!=
NULL
)
{
if
(
pucch_Config
->
dl_DataToUL_ACK
!=
NULL
)
{
for
(
int
i
=
0
;
i
<
8
;
i
++
)
{
for
(
int
i
=
0
;
i
<
8
;
i
++
)
{
pdsch_to_harq_feedback
[
i
]
=
*
pucch_Config
->
dl_DataToUL_ACK
->
list
.
array
[
i
];
pdsch_to_harq_feedback
[
i
]
=
*
pucch_Config
->
dl_DataToUL_ACK
->
list
.
array
[
i
];
if
(
pdsch_to_harq_feedback
[
i
]
>*
max_fb_time
)
*
max_fb_time
=
pdsch_to_harq_feedback
[
i
];
}
}
}
}
else
else
...
...
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c
View file @
86cf5e70
...
@@ -45,8 +45,8 @@ static void nr_fill_nfapi_pucch(gNB_MAC_INST *nrmac,
...
@@ -45,8 +45,8 @@ static void nr_fill_nfapi_pucch(gNB_MAC_INST *nrmac,
const
NR_sched_pucch_t
*
pucch
,
const
NR_sched_pucch_t
*
pucch
,
NR_UE_info_t
*
UE
)
NR_UE_info_t
*
UE
)
{
{
nfapi_nr_ul_tti_request_t
*
future_ul_tti_req
=
&
nrmac
->
UL_tti_req_ahead
[
0
][
pucch
->
ul_slot
];
nfapi_nr_ul_tti_request_t
*
future_ul_tti_req
=
&
nrmac
->
UL_tti_req_ahead
[
0
][
pucch
->
ul_slot
];
if
(
future_ul_tti_req
->
SFN
!=
pucch
->
frame
||
future_ul_tti_req
->
Slot
!=
pucch
->
ul_slot
)
if
(
future_ul_tti_req
->
SFN
!=
pucch
->
frame
||
future_ul_tti_req
->
Slot
!=
pucch
->
ul_slot
)
LOG_W
(
MAC
,
LOG_W
(
MAC
,
"Current %d.%d : future UL_tti_req's frame.slot %4d.%2d does not match PUCCH %4d.%2d
\n
"
,
"Current %d.%d : future UL_tti_req's frame.slot %4d.%2d does not match PUCCH %4d.%2d
\n
"
,
...
@@ -55,6 +55,7 @@ static void nr_fill_nfapi_pucch(gNB_MAC_INST *nrmac,
...
@@ -55,6 +55,7 @@ static void nr_fill_nfapi_pucch(gNB_MAC_INST *nrmac,
future_ul_tti_req
->
Slot
,
future_ul_tti_req
->
Slot
,
pucch
->
frame
,
pucch
->
frame
,
pucch
->
ul_slot
);
pucch
->
ul_slot
);
// n_pdus is number of pdus, so, in the array, it is the index of the next free element
// n_pdus is number of pdus, so, in the array, it is the index of the next free element
if
(
future_ul_tti_req
->
n_pdus
>=
sizeofArray
(
future_ul_tti_req
->
pdus_list
)
)
{
if
(
future_ul_tti_req
->
n_pdus
>=
sizeofArray
(
future_ul_tti_req
->
pdus_list
)
)
{
LOG_E
(
NR_MAC
,
"future_ul_tti_req->n_pdus %d is full, slot: %d, sr flag %d dropping request
\n
"
,
LOG_E
(
NR_MAC
,
"future_ul_tti_req->n_pdus %d is full, slot: %d, sr flag %d dropping request
\n
"
,
...
@@ -139,24 +140,21 @@ void nr_schedule_pucch(gNB_MAC_INST *nrmac,
...
@@ -139,24 +140,21 @@ void nr_schedule_pucch(gNB_MAC_INST *nrmac,
UE_iterator
(
nrmac
->
UE_info
.
list
,
UE
)
{
UE_iterator
(
nrmac
->
UE_info
.
list
,
UE
)
{
NR_UE_sched_ctrl_t
*
sched_ctrl
=
&
UE
->
UE_sched_ctrl
;
NR_UE_sched_ctrl_t
*
sched_ctrl
=
&
UE
->
UE_sched_ctrl
;
const
int
n
=
sizeof
(
sched_ctrl
->
sched_pucch
)
/
sizeof
(
*
sched_ctrl
->
sched_pucch
);
for
(
int
i
=
0
;
i
<
sched_ctrl
->
sched_pucch_size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
n
;
i
++
)
{
NR_sched_pucch_t
*
curr_pucch
=
&
UE
->
UE_sched_ctrl
.
sched_pucch
[
i
];
NR_sched_pucch_t
*
curr_pucch
=
&
UE
->
UE_sched_ctrl
.
sched_pucch
[
i
];
const
uint16_t
O_ack
=
curr_pucch
->
dai_c
;
const
uint16_t
O_ack
=
curr_pucch
->
dai_c
;
const
uint16_t
O_csi
=
curr_pucch
->
csi_bits
;
const
uint16_t
O_csi
=
curr_pucch
->
csi_bits
;
const
uint8_t
O_sr
=
curr_pucch
->
sr_flag
;
const
uint8_t
O_sr
=
curr_pucch
->
sr_flag
;
if
(
O_ack
+
O_csi
+
O_sr
==
0
if
(
curr_pucch
->
active
==
true
&&
||
frameP
!=
curr_pucch
->
frame
frameP
==
curr_pucch
->
frame
&&
||
slotP
!=
curr_pucch
->
ul_slot
)
slotP
==
curr_pucch
->
ul_slot
)
{
continue
;
if
(
O_csi
>
0
)
LOG_D
(
NR_MAC
,
"Scheduling PUCCH[%d] RX for UE %04x in %4d.%2d O_ack %d, O_sr %d, O_csi %d
\n
"
,
LOG_D
(
NR_MAC
,
"Scheduling PUCCH[%d] RX for UE %04x in %4d.%2d O_ack %d, O_sr %d, O_csi %d
\n
"
,
i
,
UE
->
rnti
,
curr_pucch
->
frame
,
curr_pucch
->
ul_slot
,
O_ack
,
O_sr
,
O_csi
);
i
,
UE
->
rnti
,
curr_pucch
->
frame
,
curr_pucch
->
ul_slot
,
O_ack
,
O_sr
,
O_csi
);
nr_fill_nfapi_pucch
(
nrmac
,
frameP
,
slotP
,
curr_pucch
,
UE
);
nr_fill_nfapi_pucch
(
nrmac
,
frameP
,
slotP
,
curr_pucch
,
UE
);
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
}
}
}
}
}
}
}
void
nr_csi_meas_reporting
(
int
Mod_idP
,
void
nr_csi_meas_reporting
(
int
Mod_idP
,
...
@@ -186,14 +184,16 @@ void nr_csi_meas_reporting(int Mod_idP,
...
@@ -186,14 +184,16 @@ void nr_csi_meas_reporting(int Mod_idP,
if
(
pucchcsires
->
uplinkBandwidthPartId
!=
ul_bwp
->
bwp_id
)
if
(
pucchcsires
->
uplinkBandwidthPartId
!=
ul_bwp
->
bwp_id
)
continue
;
continue
;
// we schedule CSI reporting max_fb_time slots in advance
int
period
,
offset
;
int
period
,
offset
;
csi_period_offset
(
csirep
,
NULL
,
&
period
,
&
offset
);
csi_period_offset
(
csirep
,
NULL
,
&
period
,
&
offset
);
const
int
sched_slot
=
(
period
+
offset
)
%
n_slots_frame
;
const
int
sched_slot
=
(
slot
+
ul_bwp
->
max_fb_time
)
%
n_slots_frame
;
const
int
sched_frame
=
(
frame
+
((
slot
+
ul_bwp
->
max_fb_time
)
/
n_slots_frame
))
%
1024
;
// prepare to schedule csi measurement reception according to 5.2.1.4 in 38.214
// prepare to schedule csi measurement reception according to 5.2.1.4 in 38.214
// preparation is done in first slot of tdd period
if
((
sched_frame
*
n_slots_frame
+
sched_slot
-
offset
)
%
period
!=
0
)
if
(
frame
%
(
period
/
n_slots_frame
)
!=
offset
/
n_slots_frame
)
continue
;
continue
;
LOG_D
(
NR_MAC
,
"CSI reporting in frame %d slot %d CSI report ID %ld
\n
"
,
frame
,
sched_slot
,
csirep
->
reportConfigId
);
LOG_D
(
NR_MAC
,
"CSI reporting in frame %d slot %d CSI report ID %ld
\n
"
,
sched_frame
,
sched_slot
,
csirep
->
reportConfigId
);
const
NR_PUCCH_ResourceSet_t
*
pucchresset
=
pucch_Config
->
resourceSetToAddModList
->
list
.
array
[
1
];
// set with formats >1
const
NR_PUCCH_ResourceSet_t
*
pucchresset
=
pucch_Config
->
resourceSetToAddModList
->
list
.
array
[
1
];
// set with formats >1
const
int
n
=
pucchresset
->
resourceList
.
list
.
count
;
const
int
n
=
pucchresset
->
resourceList
.
list
.
count
;
...
@@ -206,17 +206,21 @@ void nr_csi_meas_reporting(int Mod_idP,
...
@@ -206,17 +206,21 @@ void nr_csi_meas_reporting(int Mod_idP,
// find free PUCCH that is in order with possibly existing PUCCH
// find free PUCCH that is in order with possibly existing PUCCH
// schedulings (other CSI, SR)
// schedulings (other CSI, SR)
NR_sched_pucch_t
*
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
1
];
NR_sched_pucch_t
*
curr_pucch
=
NULL
;
AssertFatal
(
curr_pucch
->
csi_bits
==
0
for
(
int
i
=
0
;
i
<
sched_ctrl
->
sched_pucch_size
;
i
++
)
{
&&
!
curr_pucch
->
sr_flag
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
i
];
&&
curr_pucch
->
dai_c
==
0
,
if
(
!
curr_pucch
->
active
)
"PUCCH not free at index 1 for UE %04x
\n
"
,
break
;
// found an available pucch structure
}
AssertFatal
(
curr_pucch
,
"PUCCH structure not found for UE %04x
\n
"
,
UE
->
rnti
);
UE
->
rnti
);
curr_pucch
->
r_pucch
=
-
1
;
curr_pucch
->
r_pucch
=
-
1
;
curr_pucch
->
frame
=
frame
;
curr_pucch
->
frame
=
sched_
frame
;
curr_pucch
->
ul_slot
=
sched_slot
;
curr_pucch
->
ul_slot
=
sched_slot
;
curr_pucch
->
resource_indicator
=
res_index
;
curr_pucch
->
resource_indicator
=
res_index
;
curr_pucch
->
csi_bits
+=
nr_get_csi_bitlen
(
UE
->
csi_report_template
,
csi_report_id
);
curr_pucch
->
csi_bits
+=
nr_get_csi_bitlen
(
UE
->
csi_report_template
,
csi_report_id
);
curr_pucch
->
active
=
true
;
int
bwp_start
=
ul_bwp
->
BWPStart
;
int
bwp_start
=
ul_bwp
->
BWPStart
;
...
@@ -235,7 +239,7 @@ void nr_csi_meas_reporting(int Mod_idP,
...
@@ -235,7 +239,7 @@ void nr_csi_meas_reporting(int Mod_idP,
len
=
pucchres
->
format
.
choice
.
format2
->
nrofPRBs
;
len
=
pucchres
->
format
.
choice
.
format2
->
nrofPRBs
;
mask
=
SL_to_bitmap
(
pucchres
->
format
.
choice
.
format2
->
startingSymbolIndex
,
pucchres
->
format
.
choice
.
format2
->
nrofSymbols
);
mask
=
SL_to_bitmap
(
pucchres
->
format
.
choice
.
format2
->
startingSymbolIndex
,
pucchres
->
format
.
choice
.
format2
->
nrofSymbols
);
curr_pucch
->
simultaneous_harqcsi
=
pucch_Config
->
format2
->
choice
.
setup
->
simultaneousHARQ_ACK_CSI
;
curr_pucch
->
simultaneous_harqcsi
=
pucch_Config
->
format2
->
choice
.
setup
->
simultaneousHARQ_ACK_CSI
;
LOG_D
(
NR_MAC
,
"%d.%d Allocating PUCCH format 2, startPRB %d, nPRB %d, simulHARQ %d, num_bits %d
\n
"
,
frame
,
sched_slot
,
start
,
len
,
curr_pucch
->
simultaneous_harqcsi
,
curr_pucch
->
csi_bits
);
LOG_D
(
NR_MAC
,
"%d.%d Allocating PUCCH format 2, startPRB %d, nPRB %d, simulHARQ %d, num_bits %d
\n
"
,
sched_
frame
,
sched_slot
,
start
,
len
,
curr_pucch
->
simultaneous_harqcsi
,
curr_pucch
->
csi_bits
);
break
;
break
;
case
NR_PUCCH_Resource__format_PR_format3
:
case
NR_PUCCH_Resource__format_PR_format3
:
len
=
pucchres
->
format
.
choice
.
format3
->
nrofPRBs
;
len
=
pucchres
->
format
.
choice
.
format3
->
nrofPRBs
;
...
@@ -252,7 +256,7 @@ void nr_csi_meas_reporting(int Mod_idP,
...
@@ -252,7 +256,7 @@ void nr_csi_meas_reporting(int Mod_idP,
// verify resources are free
// verify resources are free
for
(
int
i
=
start
;
i
<
start
+
len
;
++
i
)
{
for
(
int
i
=
start
;
i
<
start
+
len
;
++
i
)
{
if
((
vrb_map_UL
[
i
+
bwp_start
]
&
mask
)
!=
0
)
{
if
((
vrb_map_UL
[
i
+
bwp_start
]
&
mask
)
!=
0
)
{
LOG_E
(
NR_MAC
,
"%4d.%2d VRB MAP in %4d.%2d not free. Can't schedule CSI reporting on PUCCH.
\n
"
,
frame
,
slot
,
frame
,
sched_slot
);
LOG_E
(
NR_MAC
,
"%4d.%2d VRB MAP in %4d.%2d not free. Can't schedule CSI reporting on PUCCH.
\n
"
,
frame
,
slot
,
sched_
frame
,
sched_slot
);
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
memset
(
curr_pucch
,
0
,
sizeof
(
*
curr_pucch
));
}
}
else
else
...
@@ -993,7 +997,7 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
...
@@ -993,7 +997,7 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
}
}
}
}
bool
test_
acknack
_vrb_occupation
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
bool
test_
pucch0
_vrb_occupation
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_sched_pucch_t
*
pucch
,
NR_sched_pucch_t
*
pucch
,
uint16_t
*
vrb_map_UL
,
uint16_t
*
vrb_map_UL
,
const
NR_ServingCellConfigCommon_t
*
scc
,
const
NR_ServingCellConfigCommon_t
*
scc
,
...
@@ -1004,15 +1008,6 @@ bool test_acknack_vrb_occupation(NR_UE_sched_ctrl_t *sched_ctrl,
...
@@ -1004,15 +1008,6 @@ bool test_acknack_vrb_occupation(NR_UE_sched_ctrl_t *sched_ctrl,
// We assume initial cyclic shift is always 0 so different pucch resources can't overlap
// We assume initial cyclic shift is always 0 so different pucch resources can't overlap
NR_sched_pucch_t
*
csi_pucch
=
&
sched_ctrl
->
sched_pucch
[
1
];
if
(
csi_pucch
&&
csi_pucch
->
csi_bits
>
0
&&
csi_pucch
->
frame
==
pucch
->
frame
&&
csi_pucch
->
ul_slot
==
pucch
->
ul_slot
&&
csi_pucch
->
simultaneous_harqcsi
&&
(
csi_pucch
->
csi_bits
+
csi_pucch
->
dai_c
)
<
11
)
return
true
;
// available resources for csi_pucch already verified
if
(
r_pucch
<
0
){
if
(
r_pucch
<
0
){
const
NR_PUCCH_Resource_t
*
resource
=
pucch_Config
->
resourceToAddModList
->
list
.
array
[
0
];
const
NR_PUCCH_Resource_t
*
resource
=
pucch_Config
->
resourceToAddModList
->
list
.
array
[
0
];
DevAssert
(
resource
->
format
.
present
==
NR_PUCCH_Resource__format_PR_format0
);
DevAssert
(
resource
->
format
.
present
==
NR_PUCCH_Resource__format_PR_format0
);
...
@@ -1049,15 +1044,15 @@ bool test_acknack_vrb_occupation(NR_UE_sched_ctrl_t *sched_ctrl,
...
@@ -1049,15 +1044,15 @@ bool test_acknack_vrb_occupation(NR_UE_sched_ctrl_t *sched_ctrl,
}
}
// this function returns an index to NR_sched_pucch structure
// this function returns an index to NR_sched_pucch structure
// currently this structure contains PUCCH0 at index 0 and PUCCH2 at index 1
// if the function returns -1 it was not possible to schedule acknack
// if the function returns -1 it was not possible to schedule acknack
// when current pucch is ready to be scheduled nr_fill_nfapi_pucch is called
int
nr_acknack_scheduling
(
int
mod_id
,
int
nr_acknack_scheduling
(
int
mod_id
,
NR_UE_info_t
*
UE
,
NR_UE_info_t
*
UE
,
frame_t
frame
,
frame_t
frame
,
sub_frame_t
slot
,
sub_frame_t
slot
,
int
r_pucch
,
int
r_pucch
,
int
is_common
)
{
int
is_common
)
{
const
int
CC_id
=
0
;
const
int
CC_id
=
0
;
const
int
minfbtime
=
RC
.
nrmac
[
mod_id
]
->
minRXTXTIMEpdsch
;
const
int
minfbtime
=
RC
.
nrmac
[
mod_id
]
->
minRXTXTIMEpdsch
;
const
NR_ServingCellConfigCommon_t
*
scc
=
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
ServingCellConfigCommon
;
const
NR_ServingCellConfigCommon_t
*
scc
=
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
ServingCellConfigCommon
;
...
@@ -1066,10 +1061,8 @@ int nr_acknack_scheduling(int mod_id,
...
@@ -1066,10 +1061,8 @@ int nr_acknack_scheduling(int mod_id,
const
NR_TDD_UL_DL_Pattern_t
*
tdd
=
scc
->
tdd_UL_DL_ConfigurationCommon
?
&
scc
->
tdd_UL_DL_ConfigurationCommon
->
pattern1
:
NULL
;
const
NR_TDD_UL_DL_Pattern_t
*
tdd
=
scc
->
tdd_UL_DL_ConfigurationCommon
?
&
scc
->
tdd_UL_DL_ConfigurationCommon
->
pattern1
:
NULL
;
AssertFatal
(
tdd
||
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
frame_type
==
FDD
,
"Dynamic TDD not handled yet
\n
"
);
AssertFatal
(
tdd
||
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
frame_type
==
FDD
,
"Dynamic TDD not handled yet
\n
"
);
const
int
nr_slots_period
=
tdd
?
n_slots_frame
/
get_nb_periods_per_frame
(
tdd
->
dl_UL_TransmissionPeriodicity
)
:
n_slots_frame
;
const
int
nr_slots_period
=
tdd
?
n_slots_frame
/
get_nb_periods_per_frame
(
tdd
->
dl_UL_TransmissionPeriodicity
)
:
n_slots_frame
;
const
int
next_ul_slot
=
tdd
?
tdd
->
nrofDownlinkSlots
+
nr_slots_period
*
(
slot
/
nr_slots_period
)
:
slot
+
minfbtime
;
const
int
first_ul_slot_period
=
tdd
?
tdd
->
nrofDownlinkSlots
:
0
;
const
int
first_ul_slot_period
=
tdd
?
tdd
->
nrofDownlinkSlots
:
0
;
/* for the moment, we consider:
/* for the moment, we consider:
* * only pucch_sched[0] holds HARQ (and SR)
* * only pucch_sched[0] holds HARQ (and SR)
* * we do not multiplex with CSI, which is always in pucch_sched[2]
* * we do not multiplex with CSI, which is always in pucch_sched[2]
...
@@ -1082,247 +1075,110 @@ int nr_acknack_scheduling(int mod_id,
...
@@ -1082,247 +1075,110 @@ int nr_acknack_scheduling(int mod_id,
int
bwp_start
=
ul_bwp
->
BWPStart
;
int
bwp_start
=
ul_bwp
->
BWPStart
;
int
bwp_size
=
ul_bwp
->
BWPSize
;
int
bwp_size
=
ul_bwp
->
BWPSize
;
NR_sched_pucch_t
*
pucch
=
&
sched_ctrl
->
sched_pucch
[
0
];
LOG_D
(
NR_MAC
,
"In %s: %4d.%2d Trying to allocate pucch, current DAI %d
\n
"
,
__FUNCTION__
,
frame
,
slot
,
pucch
->
dai_c
);
pucch
->
r_pucch
=
r_pucch
;
AssertFatal
(
pucch
->
csi_bits
==
0
,
"%s(): csi_bits %d in sched_pucch[0]
\n
"
,
__func__
,
pucch
->
csi_bits
);
/* if the currently allocated PUCCH of this UE is full, allocate it */
NR_sched_pucch_t
*
csi_pucch
=
&
sched_ctrl
->
sched_pucch
[
1
];
if
(
pucch
->
dai_c
==
2
)
{
/* advance the UL slot information in PUCCH by one so we won't schedule in
* the same slot again */
const
int
f
=
pucch
->
frame
;
const
int
s
=
pucch
->
ul_slot
;
LOG_D
(
NR_MAC
,
"In %s: %4d.%2d DAI = 2 pucch currently in %4d.%2d, advancing by 1 slot
\n
"
,
__FUNCTION__
,
frame
,
slot
,
f
,
s
);
if
(
!
(
csi_pucch
&&
csi_pucch
->
csi_bits
>
0
&&
csi_pucch
->
frame
==
f
&&
csi_pucch
->
ul_slot
==
s
))
nr_fill_nfapi_pucch
(
RC
.
nrmac
[
mod_id
],
frame
,
slot
,
pucch
,
UE
);
memset
(
pucch
,
0
,
sizeof
(
*
pucch
));
pucch
->
frame
=
s
==
n_slots_frame
-
1
?
(
f
+
1
)
%
1024
:
f
;
if
(((
s
+
1
)
%
nr_slots_period
)
==
0
)
pucch
->
ul_slot
=
(
s
+
1
+
first_ul_slot_period
)
%
n_slots_frame
;
else
pucch
->
ul_slot
=
(
s
+
1
)
%
n_slots_frame
;
// we assume that only two indices over the array sched_pucch exist
// skip the CSI PUCCH if it is present and if in the next frame/slot
// and if we don't multiplex
csi_pucch
->
r_pucch
=-
1
;
if
(
csi_pucch
&&
csi_pucch
->
csi_bits
>
0
&&
csi_pucch
->
frame
==
pucch
->
frame
&&
csi_pucch
->
ul_slot
==
pucch
->
ul_slot
&&
!
csi_pucch
->
simultaneous_harqcsi
)
{
LOG_D
(
NR_MAC
,
"Cannot multiplex csi_pucch for %d.%d
\n
"
,
csi_pucch
->
frame
,
csi_pucch
->
ul_slot
);
nr_fill_nfapi_pucch
(
RC
.
nrmac
[
mod_id
],
frame
,
slot
,
csi_pucch
,
UE
);
memset
(
csi_pucch
,
0
,
sizeof
(
*
csi_pucch
));
pucch
->
frame
=
pucch
->
ul_slot
==
n_slots_frame
-
1
?
(
pucch
->
frame
+
1
)
%
1024
:
pucch
->
frame
;
if
(((
pucch
->
ul_slot
+
1
)
%
nr_slots_period
)
==
0
)
pucch
->
ul_slot
=
(
pucch
->
ul_slot
+
1
+
first_ul_slot_period
)
%
n_slots_frame
;
else
pucch
->
ul_slot
=
(
pucch
->
ul_slot
+
1
)
%
n_slots_frame
;
}
}
LOG_D
(
NR_MAC
,
"In %s: pucch_acknak 1. DL %4d.%2d, UL_ACK %4d.%2d, DAI_C %d
\n
"
,
__FUNCTION__
,
frame
,
slot
,
pucch
->
frame
,
pucch
->
ul_slot
,
pucch
->
dai_c
);
nr_dci_format_t
dci_format
=
NR_DL_DCI_FORMAT_1_0
;
nr_dci_format_t
dci_format
=
NR_DL_DCI_FORMAT_1_0
;
if
(
is_common
==
0
)
if
(
is_common
==
0
)
dci_format
=
UE
->
current_DL_BWP
.
dci_format
;
dci_format
=
UE
->
current_DL_BWP
.
dci_format
;
uint8_t
pdsch_to_harq_feedback
[
8
];
uint8_t
pdsch_to_harq_feedback
[
8
];
int
max_fb_time
=
0
;
get_pdsch_to_harq_feedback
(
pucch_Config
,
dci_format
,
pdsch_to_harq_feedback
);
get_pdsch_to_harq_feedback
(
pucch_Config
,
dci_format
,
&
max_fb_time
,
pdsch_to_harq_feedback
);
for
(
int
f
=
0
;
f
<
8
;
f
++
)
{
LOG_D
(
NR_MAC
,
"In %s: 1b. DL %4d.%2d, UL_ACK %4d.%2d, DAI_C %d
\n
"
,
__FUNCTION__
,
frame
,
slot
,
pucch
->
frame
,
pucch
->
ul_slot
,
pucch
->
dai_c
);
// can't schedule ACKNACK before minimum feedback time
/* there is a HARQ. Check whether we can use it for this ACKNACK */
if
(
pdsch_to_harq_feedback
[
f
]
<
minfbtime
)
if
(
pucch
->
dai_c
>
0
)
{
continue
;
/* this UE already has a PUCCH occasion */
const
int
pucch_slot
=
(
slot
+
pdsch_to_harq_feedback
[
f
])
%
n_slots_frame
;
// Find the right timing_indicator value.
// check if the slot is UL
int
i
=
0
;
if
(
pucch_slot
%
nr_slots_period
<
first_ul_slot_period
)
while
(
i
<
8
)
{
continue
;
int
diff
=
pucch
->
ul_slot
-
slot
;
const
int
pucch_frame
=
frame
+
((
slot
+
pdsch_to_harq_feedback
[
f
])
/
n_slots_frame
);
if
(
diff
<
0
)
int
inactive_pucch
=
-
1
;
diff
+=
n_slots_frame
;
NR_sched_pucch_t
*
curr_pucch
=
NULL
;
if
(
pdsch_to_harq_feedback
[
i
]
==
diff
&&
for
(
int
i
=
0
;
i
<
sched_ctrl
->
sched_pucch_size
;
i
++
)
{
pdsch_to_harq_feedback
[
i
]
>=
minfbtime
)
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
i
];
break
;
// if there is already an active pucch for this frame and slot
++
i
;
if
(
curr_pucch
->
active
&&
}
curr_pucch
->
frame
==
pucch_frame
&&
if
(
i
>=
8
)
{
curr_pucch
->
ul_slot
==
pucch_slot
)
{
// we cannot reach this timing anymore, allocate and try again
LOG_D
(
NR_MAC
,
"In %s: pucch_acknak DL %4d.%2d, UL_ACK %4d.%2d Bits already in current PUCCH: DAI_C %d CSI %d
\n
"
,
const
int
f
=
pucch
->
frame
;
__FUNCTION__
,
frame
,
slot
,
pucch_frame
,
pucch_slot
,
curr_pucch
->
dai_c
,
curr_pucch
->
csi_bits
);
const
int
s
=
pucch
->
ul_slot
;
// we can't schedule if short pucch is already full
LOG_D
(
NR_MAC
,
"In %s: %4d.%2d DAI > 0, cannot reach timing for pucch in %4d.%2d, advancing slot by 1 and trying again
\n
"
,
__FUNCTION__
,
frame
,
slot
,
f
,
s
);
if
(
curr_pucch
->
csi_bits
==
0
&&
if
(
!
(
csi_pucch
&&
curr_pucch
->
dai_c
==
2
)
csi_pucch
->
csi_bits
>
0
&&
continue
;
csi_pucch
->
frame
==
f
&&
// if there is CSI but simultaneous HARQ+CSI is disable we can't schedule
csi_pucch
->
ul_slot
==
s
))
if
(
!
curr_pucch
->
simultaneous_harqcsi
&&
nr_fill_nfapi_pucch
(
RC
.
nrmac
[
mod_id
],
frame
,
slot
,
pucch
,
UE
);
curr_pucch
->
csi_bits
>
0
)
memset
(
pucch
,
0
,
sizeof
(
*
pucch
));
continue
;
pucch
->
frame
=
s
==
n_slots_frame
-
1
?
(
f
+
1
)
%
1024
:
f
;
// TODO we can't schedule more than 11 bits in PUCCH2 for now
if
(((
s
+
1
)
%
nr_slots_period
)
==
0
)
if
(
curr_pucch
->
csi_bits
+
curr_pucch
->
dai_c
>=
10
)
pucch
->
ul_slot
=
(
s
+
1
+
first_ul_slot_period
)
%
n_slots_frame
;
continue
;
else
pucch
->
ul_slot
=
(
s
+
1
)
%
n_slots_frame
;
// otherwise we can schedule in this active PUCCH
return
nr_acknack_scheduling
(
mod_id
,
UE
,
frame
,
slot
,
r_pucch
,
is_common
);
// no need to check VRB occupation because already done when PUCCH has been activated
}
curr_pucch
->
timing_indicator
=
f
;
curr_pucch
->
dai_c
++
;
pucch
->
timing_indicator
=
i
;
LOG_D
(
NR_MAC
,
"In %s: DL %4d.%2d, UL_ACK %4d.%2d Scheduling ACK/NACK in PUCCH %d with timing indicator %d DAI %d CSI %d
\n
"
,
pucch
->
dai_c
++
;
__FUNCTION__
,
frame
,
slot
,
curr_pucch
->
frame
,
curr_pucch
->
ul_slot
,
i
,
f
,
curr_pucch
->
dai_c
,
curr_pucch
->
csi_bits
);
// if there is CSI in this slot update the HARQ information for that one too
return
i
;
// index of current PUCCH structure
if
(
csi_pucch
&&
}
csi_pucch
->
csi_bits
>
0
&&
else
if
(
!
curr_pucch
->
active
)
csi_pucch
->
frame
==
pucch
->
frame
&&
inactive_pucch
=
i
;
csi_pucch
->
ul_slot
==
pucch
->
ul_slot
)
{
}
csi_pucch
->
timing_indicator
=
i
;
if
(
inactive_pucch
>
0
)
{
csi_pucch
->
dai_c
++
;
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
inactive_pucch
];
}
// retain old resource indicator, and we are good
LOG_D
(
NR_MAC
,
"In %s: %4d.%2d. DAI > 0, pucch allocated for %4d.%2d (index %d)
\n
"
,
__FUNCTION__
,
frame
,
slot
,
pucch
->
frame
,
pucch
->
ul_slot
,
pucch
->
timing_indicator
);
return
0
;
}
LOG_D
(
NR_MAC
,
"In %s: %4d.%2d DAI = 0, looking for new pucch occasion
\n
"
,
__FUNCTION__
,
frame
,
slot
);
/* we need to find a new PUCCH occasion */
/*(Re)Inizialization of timing information*/
if
((
pucch
->
frame
==
0
&&
pucch
->
ul_slot
==
0
)
||
((
pucch
->
frame
*
n_slots_frame
+
pucch
->
ul_slot
)
<
(
frame
*
n_slots_frame
+
slot
)))
{
AssertFatal
(
pucch
->
sr_flag
+
pucch
->
dai_c
==
0
,
"expected no SR/AckNack for UE %04x in %4d.%2d, but has %d/%d for %4d.%2d
\n
"
,
UE
->
rnti
,
frame
,
slot
,
pucch
->
sr_flag
,
pucch
->
dai_c
,
pucch
->
frame
,
pucch
->
ul_slot
);
const
int
s
=
next_ul_slot
;
pucch
->
frame
=
s
<
n_slots_frame
?
frame
:
(
frame
+
1
)
%
1024
;
pucch
->
ul_slot
=
s
%
n_slots_frame
;
}
// Find the right timing_indicator value.
int
ind_found
=
-
1
;
// while we are within the feedback limits
uint16_t
*
vrb_map_UL
;
while
((
n_slots_frame
+
pucch
->
ul_slot
-
slot
)
%
n_slots_frame
<=
max_fb_time
)
{
// checking if in ul_slot the resources potentially to be assigned to this PUCCH are available
// checking if in ul_slot the resources potentially to be assigned to this PUCCH are available
vrb_map_UL
=
&
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
vrb_map_UL
[
pucch
->
ul
_slot
*
MAX_BWP_SIZE
];
uint16_t
*
vrb_map_UL
=
&
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
vrb_map_UL
[
pucch
_slot
*
MAX_BWP_SIZE
];
bool
ret
=
test_acknack
_vrb_occupation
(
sched_ctrl
,
bool
ret
=
test_pucch0
_vrb_occupation
(
sched_ctrl
,
pucch
,
curr_
pucch
,
vrb_map_UL
,
vrb_map_UL
,
scc
,
scc
,
pucch_Config
,
pucch_Config
,
r_pucch
,
r_pucch
,
bwp_start
,
bwp_start
,
bwp_size
);
bwp_size
);
if
(
ret
)
{
if
(
!
ret
)
{
int
i
=
0
;
LOG_D
(
NR_MAC
,
"In %s: DL %4d.%2d, UL_ACK %4d.%2d PRB resources for this occasion are already occupied, move to the following occasion
\n
"
,
while
(
i
<
8
)
{
__FUNCTION__
,
frame
,
slot
,
curr_pucch
->
frame
,
curr_pucch
->
ul_slot
);
LOG_D
(
NR_MAC
,
"pdsch_to_harq_feedback[%d] = %d (pucch->ul_slot %d - slot %d)
\n
"
,
continue
;
i
,
pdsch_to_harq_feedback
[
i
],
pucch
->
ul_slot
,
slot
);
int
diff
=
pucch
->
ul_slot
-
slot
;
if
(
diff
<
0
)
diff
+=
n_slots_frame
;
if
(
pdsch_to_harq_feedback
[
i
]
==
diff
&&
pdsch_to_harq_feedback
[
i
]
>=
minfbtime
)
{
ind_found
=
i
;
break
;
}
++
i
;
}
if
(
ind_found
!=-
1
)
break
;
}
// advance to the next ul slot
const
int
f
=
pucch
->
frame
;
const
int
s
=
pucch
->
ul_slot
;
pucch
->
frame
=
s
==
n_slots_frame
-
1
?
(
f
+
1
)
%
1024
:
f
;
if
(((
s
+
1
)
%
nr_slots_period
)
==
0
)
pucch
->
ul_slot
=
(
s
+
1
+
first_ul_slot_period
)
%
n_slots_frame
;
else
pucch
->
ul_slot
=
(
s
+
1
)
%
n_slots_frame
;
}
if
(
ind_found
==-
1
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d could not find pdsch_to_harq_feedback for UE %04x: earliest "
"ack slot %d
\n
"
,
frame
,
slot
,
UE
->
rnti
,
pucch
->
ul_slot
);
return
-
1
;
}
}
// allocating a new PUCCH structure for this occasion
if
(
csi_pucch
&&
curr_pucch
->
active
=
true
;
csi_pucch
->
csi_bits
>
0
&&
curr_pucch
->
frame
=
pucch_frame
;
csi_pucch
->
frame
==
pucch
->
frame
&&
curr_pucch
->
ul_slot
=
pucch_slot
;
csi_pucch
->
ul_slot
==
pucch
->
ul_slot
)
{
curr_pucch
->
timing_indicator
=
f
;
// index in the list of timing indicators
// skip the CSI PUCCH if it is present and if in the next frame/slot
curr_pucch
->
dai_c
++
;
// and if we don't multiplex
curr_pucch
->
resource_indicator
=
0
;
// each UE has dedicated PUCCH resources
/* FIXME currently we support at most 11 bits in pucch2 so skip also in that case.
curr_pucch
->
r_pucch
=
r_pucch
;
We need to set the limit to 10 because SR scheduling has been moved afterwards */
if
(
!
csi_pucch
->
simultaneous_harqcsi
LOG_D
(
NR_MAC
,
"In %s: DL %4d.%2d, UL_ACK %4d.%2d Scheduling ACK/NACK in PUCCH %d with timing indicator %d DAI %d
\n
"
,
||
((
csi_pucch
->
csi_bits
+
csi_pucch
->
dai_c
)
>=
10
))
{
__FUNCTION__
,
frame
,
slot
,
curr_pucch
->
frame
,
curr_pucch
->
ul_slot
,
inactive_pucch
,
f
,
curr_pucch
->
dai_c
);
LOG_D
(
NR_MAC
,
"Cannot multiplex csi_pucch %d +csi_pucch->dai_c %d for %d.%d
\n
"
,
csi_pucch
->
csi_bits
,
csi_pucch
->
dai_c
,
csi_pucch
->
frame
,
csi_pucch
->
ul_slot
);
nr_fill_nfapi_pucch
(
RC
.
nrmac
[
mod_id
],
frame
,
slot
,
csi_pucch
,
UE
);
// blocking resources for current PUCCH in VRB map
memset
(
csi_pucch
,
0
,
sizeof
(
*
csi_pucch
));
for
(
int
l
=
0
;
l
<
curr_pucch
->
nr_of_symb
;
l
++
)
{
/* advance the UL slot information in PUCCH by one so we won't schedule in
uint16_t
symb
=
SL_to_bitmap
(
curr_pucch
->
start_symb
+
l
,
1
);
* the same slot again */
int
prb
;
const
int
f
=
pucch
->
frame
;
if
(
l
==
1
&&
curr_pucch
->
second_hop_prb
!=
0
)
const
int
s
=
pucch
->
ul_slot
;
prb
=
curr_pucch
->
second_hop_prb
;
memset
(
pucch
,
0
,
sizeof
(
*
pucch
));
pucch
->
frame
=
s
==
n_slots_frame
-
1
?
(
f
+
1
)
%
1024
:
f
;
if
(((
s
+
1
)
%
nr_slots_period
)
==
0
)
pucch
->
ul_slot
=
(
s
+
1
+
first_ul_slot_period
)
%
n_slots_frame
;
else
else
pucch
->
ul_slot
=
(
s
+
1
)
%
n_slots_frame
;
prb
=
curr_pucch
->
prb_start
;
return
nr_acknack_scheduling
(
mod_id
,
UE
,
frame
,
slot
,
r_pucch
,
is_common
)
;
vrb_map_UL
[
bwp_start
+
prb
]
|=
symb
;
}
}
// multiplexing harq and csi in a pucch
else
{
csi_pucch
->
timing_indicator
=
ind_found
;
csi_pucch
->
dai_c
++
;
memset
(
pucch
,
0
,
sizeof
(
*
pucch
));
LOG_D
(
NR_MAC
,
"multiplexing csi_pucch %d +csi_pucch->dai_c %d for %d.%d
\n
"
,
csi_pucch
->
csi_bits
,
csi_pucch
->
dai_c
,
csi_pucch
->
frame
,
csi_pucch
->
ul_slot
);
return
inactive_pucch
;
// index of current PUCCH structure
return
1
;
}
}
}
}
LOG_D
(
NR_MAC
,
"In %s: DL %4d.%2d, Couldn't find scheduling occasion for this HARQ process
\n
"
,
pucch
->
timing_indicator
=
ind_found
;
// index in the list of timing indicators
__FUNCTION__
,
frame
,
slot
);
return
-
1
;
LOG_D
(
NR_MAC
,
"In %s: 2. DAI 0 DL %4d.%2d, UL_ACK %4d.%2d (index %d)
\n
"
,
__FUNCTION__
,
frame
,
slot
,
pucch
->
frame
,
pucch
->
ul_slot
,
pucch
->
timing_indicator
);
pucch
->
dai_c
++
;
pucch
->
resource_indicator
=
0
;
// each UE has dedicated PUCCH resources
pucch
->
r_pucch
=
r_pucch
;
vrb_map_UL
=
&
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
vrb_map_UL
[
pucch
->
ul_slot
*
MAX_BWP_SIZE
];
for
(
int
l
=
0
;
l
<
pucch
->
nr_of_symb
;
l
++
)
{
uint16_t
symb
=
SL_to_bitmap
(
pucch
->
start_symb
+
l
,
1
);
int
prb
;
if
(
l
==
1
&&
pucch
->
second_hop_prb
!=
0
)
prb
=
pucch
->
second_hop_prb
;
else
prb
=
pucch
->
prb_start
;
vrb_map_UL
[
bwp_start
+
prb
]
|=
symb
;
}
return
0
;
}
}
void
nr_sr_reporting
(
gNB_MAC_INST
*
nrmac
,
frame_t
SFN
,
sub_frame_t
slot
)
void
nr_sr_reporting
(
gNB_MAC_INST
*
nrmac
,
frame_t
SFN
,
sub_frame_t
slot
,
int
mod_id
)
{
{
if
(
!
is_xlsch_in_slot
(
nrmac
->
ulsch_slot_bitmap
[
slot
/
64
],
slot
))
if
(
!
is_xlsch_in_slot
(
nrmac
->
ulsch_slot_bitmap
[
slot
/
64
],
slot
))
return
;
return
;
int
CC_id
=
0
;
const
NR_ServingCellConfigCommon_t
*
scc
=
nrmac
->
common_channels
[
CC_id
].
ServingCellConfigCommon
;
UE_iterator
(
nrmac
->
UE_info
.
list
,
UE
)
{
UE_iterator
(
nrmac
->
UE_info
.
list
,
UE
)
{
NR_UE_sched_ctrl_t
*
sched_ctrl
=
&
UE
->
UE_sched_ctrl
;
NR_UE_sched_ctrl_t
*
sched_ctrl
=
&
UE
->
UE_sched_ctrl
;
NR_UE_UL_BWP_t
*
ul_bwp
=
&
UE
->
current_UL_BWP
;
NR_UE_UL_BWP_t
*
ul_bwp
=
&
UE
->
current_UL_BWP
;
...
@@ -1349,95 +1205,64 @@ void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot)
...
@@ -1349,95 +1205,64 @@ void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot)
LOG_D
(
NR_MAC
,
"%4d.%2d Scheduling Request identified
\n
"
,
SFN
,
slot
);
LOG_D
(
NR_MAC
,
"%4d.%2d Scheduling Request identified
\n
"
,
SFN
,
slot
);
NR_PUCCH_ResourceId_t
*
PucchResourceId
=
SchedulingRequestResourceConfig
->
resource
;
NR_PUCCH_ResourceId_t
*
PucchResourceId
=
SchedulingRequestResourceConfig
->
resource
;
int
found
=
-
1
;
int
idx
=
-
1
;
NR_PUCCH_ResourceSet_t
*
pucchresset
=
pucch_Config
->
resourceSetToAddModList
->
list
.
array
[
0
];
// set with formats 0,1
NR_PUCCH_ResourceSet_t
*
pucchresset
=
pucch_Config
->
resourceSetToAddModList
->
list
.
array
[
0
];
// set with formats 0,1
int
n_list
=
pucchresset
->
resourceList
.
list
.
count
;
int
n_list
=
pucchresset
->
resourceList
.
list
.
count
;
for
(
int
i
=
0
;
i
<
n_list
;
i
++
)
{
for
(
int
i
=
0
;
i
<
n_list
;
i
++
)
{
if
(
*
pucchresset
->
resourceList
.
list
.
array
[
i
]
==
*
PucchResourceId
)
if
(
*
pucchresset
->
resourceList
.
list
.
array
[
i
]
==
*
PucchResourceId
)
found
=
i
;
idx
=
i
;
}
}
AssertFatal
(
found
>-
1
,
"SR resource not found among PUCCH resources"
);
AssertFatal
(
idx
>-
1
,
"SR resource not found among PUCCH resources"
);
/* loop through nFAPI PUCCH messages: if the UEs is in there in this slot
NR_sched_pucch_t
*
curr_pucch
=
NULL
;
* with the resource_indicator, it means we already allocated that PUCCH
bool
found
=
false
;
* resource for AckNack (e.g., the UE has been scheduled often), and we
int
free_pucch
=
-
1
;
* just need to add the SR_flag. Otherwise, just allocate in the internal
for
(
int
i
=
0
;
i
<
sched_ctrl
->
sched_pucch_size
;
i
++
)
{
* PUCCH resource, and nr_schedule_pucch() will handle the rest */
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
i
];
NR_PUCCH_Resource_t
*
pucch_res
=
pucch_Config
->
resourceToAddModList
->
list
.
array
[
found
];
if
(
curr_pucch
->
active
&&
/* for the moment, can only handle SR on PUCCH Format 0 */
curr_pucch
->
frame
==
SFN
&&
DevAssert
(
pucch_res
->
format
.
present
==
NR_PUCCH_Resource__format_PR_format0
);
curr_pucch
->
ul_slot
==
slot
&&
nfapi_nr_ul_tti_request_t
*
ul_tti_req
=
&
nrmac
->
UL_tti_req_ahead
[
0
][
slot
];
curr_pucch
->
resource_indicator
==
found
)
{
bool
nfapi_allocated
=
false
;
curr_pucch
->
sr_flag
=
true
;
for
(
int
i
=
0
;
i
<
ul_tti_req
->
n_pdus
;
++
i
)
{
found
=
true
;
if
(
ul_tti_req
->
pdus_list
[
i
].
pdu_type
!=
NFAPI_NR_UL_CONFIG_PUCCH_PDU_TYPE
)
continue
;
nfapi_nr_pucch_pdu_t
*
pdu
=
&
ul_tti_req
->
pdus_list
[
i
].
pucch_pdu
;
/* check that it is our PUCCH F0. Assuming there can be only one */
if
(
pdu
->
rnti
==
UE
->
rnti
&&
pdu
->
format_type
==
0
// does not use NR_PUCCH_Resource__format_PR_format0
&&
pdu
->
initial_cyclic_shift
==
pucch_res
->
format
.
choice
.
format0
->
initialCyclicShift
&&
pdu
->
nr_of_symbols
==
pucch_res
->
format
.
choice
.
format0
->
nrofSymbols
&&
pdu
->
start_symbol_index
==
pucch_res
->
format
.
choice
.
format0
->
startingSymbolIndex
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d adding SR_flag 1 to PUCCH format 0 nFAPI SR for RNTI %04x
\n
"
,
SFN
,
slot
,
pdu
->
rnti
);
pdu
->
sr_flag
=
1
;
nfapi_allocated
=
true
;
break
;
}
else
if
(
pdu
->
rnti
==
UE
->
rnti
&&
pdu
->
format_type
==
2
// does not use NR_PUCCH_Resource__format_PR_format0
&&
pdu
->
nr_of_symbols
==
pucch_res
->
format
.
choice
.
format2
->
nrofSymbols
&&
pdu
->
start_symbol_index
==
pucch_res
->
format
.
choice
.
format2
->
startingSymbolIndex
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d adding SR_flag 1 to PUCCH format 2 nFAPI SR for RNTI %04x
\n
"
,
SFN
,
slot
,
pdu
->
rnti
);
pdu
->
sr_flag
=
1
;
nfapi_allocated
=
true
;
break
;
}
else
if
(
pdu
->
rnti
==
UE
->
rnti
&&
pdu
->
format_type
==
1
// does not use NR_PUCCH_Resource__format_PR_format0
&&
pdu
->
nr_of_symbols
==
pucch_res
->
format
.
choice
.
format1
->
nrofSymbols
&&
pdu
->
start_symbol_index
==
pucch_res
->
format
.
choice
.
format1
->
startingSymbolIndex
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d adding SR_flag 1 to PUCCH format 1 nFAPI SR for RNTI %04x
\n
"
,
SFN
,
slot
,
pdu
->
rnti
);
pdu
->
sr_flag
=
1
;
nfapi_allocated
=
true
;
break
;
}
else
if
(
pdu
->
rnti
==
UE
->
rnti
&&
pdu
->
format_type
==
3
// does not use NR_PUCCH_Resource__format_PR_format0
&&
pdu
->
nr_of_symbols
==
pucch_res
->
format
.
choice
.
format3
->
nrofSymbols
&&
pdu
->
start_symbol_index
==
pucch_res
->
format
.
choice
.
format3
->
startingSymbolIndex
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d adding SR_flag 1 to PUCCH format 3 nFAPI SR for RNTI %04x
\n
"
,
SFN
,
slot
,
pdu
->
rnti
);
pdu
->
sr_flag
=
1
;
nfapi_allocated
=
true
;
break
;
}
else
if
(
pdu
->
rnti
==
UE
->
rnti
&&
pdu
->
format_type
==
4
// does not use NR_PUCCH_Resource__format_PR_format0
&&
pdu
->
nr_of_symbols
==
pucch_res
->
format
.
choice
.
format4
->
nrofSymbols
&&
pdu
->
start_symbol_index
==
pucch_res
->
format
.
choice
.
format4
->
startingSymbolIndex
)
{
LOG_D
(
NR_MAC
,
"%4d.%2d adding SR_flag 1 to PUCCH format 4 nFAPI SR for RNTI %04x
\n
"
,
SFN
,
slot
,
pdu
->
rnti
);
pdu
->
sr_flag
=
1
;
nfapi_allocated
=
true
;
break
;
break
;
}
}
else
if
(
!
curr_pucch
->
active
)
free_pucch
=
i
;
// found an available pucch structure
}
}
AssertFatal
(
found
||
free_pucch
>
0
,
"Coulnd't find an available PUCCH resource to schedule SR
\n
"
);
if
(
nfapi_allocated
)
// break scheduling resource loop, continue next UE
if
(
!
found
)
{
break
;
uint16_t
*
vrb_map_UL
=
&
RC
.
nrmac
[
mod_id
]
->
common_channels
[
CC_id
].
vrb_map_UL
[
slot
*
MAX_BWP_SIZE
];
int
bwp_start
=
ul_bwp
->
BWPStart
;
/* we did not find it: check if current PUCCH is for the current slot, in
int
bwp_size
=
ul_bwp
->
BWPSize
;
* which case we add the SR to it; otherwise, allocate SR separately */
NR_sched_pucch_t
*
sched_sr
=
&
sched_ctrl
->
sched_pucch
[
free_pucch
];
NR_sched_pucch_t
*
curr_pucch
=
&
sched_ctrl
->
sched_pucch
[
0
];
bool
ret
=
test_pucch0_vrb_occupation
(
sched_ctrl
,
if
(
curr_pucch
->
frame
==
SFN
&&
curr_pucch
->
ul_slot
==
slot
)
{
sched_sr
,
if
(
curr_pucch
->
resource_indicator
!=
found
)
{
vrb_map_UL
,
LOG_W
(
NR_MAC
,
"%4d.%2d expected PUCCH in this slot to have resource indicator of SR (%d), skipping SR
\n
"
,
SFN
,
slot
,
found
);
scc
,
pucch_Config
,
-
1
,
bwp_start
,
bwp_size
);
if
(
!
ret
)
{
LOG_E
(
NR_MAC
,
"Cannot schedule SR. PRBs not available
\n
"
);
continue
;
continue
;
}
}
curr_pucch
->
sr_flag
=
true
;
sched_sr
->
frame
=
SFN
;
}
else
{
sched_sr
->
ul_slot
=
slot
;
NR_sched_pucch_t
sched_sr
=
{
sched_sr
->
sr_flag
=
true
;
.
frame
=
SFN
,
sched_sr
->
resource_indicator
=
idx
;
.
ul_slot
=
slot
,
sched_sr
->
r_pucch
=
-
1
;
.
sr_flag
=
true
,
sched_sr
->
active
=
true
;
.
resource_indicator
=
found
,
for
(
int
l
=
0
;
l
<
sched_sr
->
nr_of_symb
;
l
++
)
{
.
r_pucch
=
-
1
uint16_t
symb
=
SL_to_bitmap
(
sched_sr
->
start_symb
+
l
,
1
);
};
int
prb
;
nr_fill_nfapi_pucch
(
nrmac
,
SFN
,
slot
,
&
sched_sr
,
UE
);
if
(
l
==
1
&&
sched_sr
->
second_hop_prb
!=
0
)
prb
=
sched_sr
->
second_hop_prb
;
else
prb
=
sched_sr
->
prb_start
;
vrb_map_UL
[
bwp_start
+
prb
]
|=
symb
;
}
}
}
}
}
}
}
...
...
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
View file @
86cf5e70
...
@@ -218,7 +218,6 @@ int nr_acknack_scheduling(int Mod_idP,
...
@@ -218,7 +218,6 @@ int nr_acknack_scheduling(int Mod_idP,
void
get_pdsch_to_harq_feedback
(
NR_PUCCH_Config_t
*
pucch_Config
,
void
get_pdsch_to_harq_feedback
(
NR_PUCCH_Config_t
*
pucch_Config
,
nr_dci_format_t
dci_format
,
nr_dci_format_t
dci_format
,
int
*
max_fb_time
,
uint8_t
*
pdsch_to_harq_feedback
);
uint8_t
*
pdsch_to_harq_feedback
);
void
nr_configure_css_dci_initial
(
nfapi_nr_dl_tti_pdcch_pdu_rel15_t
*
pdcch_pdu
,
void
nr_configure_css_dci_initial
(
nfapi_nr_dl_tti_pdcch_pdu_rel15_t
*
pdcch_pdu
,
...
@@ -466,6 +465,10 @@ uint8_t get_mcs_from_cqi(int mcs_table, int cqi_table, int cqi_idx);
...
@@ -466,6 +465,10 @@ uint8_t get_mcs_from_cqi(int mcs_table, int cqi_table, int cqi_idx);
uint8_t
get_dl_nrOfLayers
(
const
NR_UE_sched_ctrl_t
*
sched_ctrl
,
const
nr_dci_format_t
dci_format
);
uint8_t
get_dl_nrOfLayers
(
const
NR_UE_sched_ctrl_t
*
sched_ctrl
,
const
nr_dci_format_t
dci_format
);
void
set_sched_pucch_list
(
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_UE_UL_BWP_t
*
ul_bwp
,
NR_ServingCellConfigCommon_t
*
scc
);
const
int
get_dl_tda
(
const
gNB_MAC_INST
*
nrmac
,
const
NR_ServingCellConfigCommon_t
*
scc
,
int
slot
);
const
int
get_dl_tda
(
const
gNB_MAC_INST
*
nrmac
,
const
NR_ServingCellConfigCommon_t
*
scc
,
int
slot
);
const
int
get_ul_tda
(
const
gNB_MAC_INST
*
nrmac
,
const
NR_ServingCellConfigCommon_t
*
scc
,
int
slot
);
const
int
get_ul_tda
(
const
gNB_MAC_INST
*
nrmac
,
const
NR_ServingCellConfigCommon_t
*
scc
,
int
slot
);
...
@@ -490,7 +493,7 @@ int get_mcs_from_bler(const NR_bler_options_t *bler_options,
...
@@ -490,7 +493,7 @@ int get_mcs_from_bler(const NR_bler_options_t *bler_options,
void
UL_tti_req_ahead_initialization
(
gNB_MAC_INST
*
gNB
,
NR_ServingCellConfigCommon_t
*
scc
,
int
n
,
int
CCid
);
void
UL_tti_req_ahead_initialization
(
gNB_MAC_INST
*
gNB
,
NR_ServingCellConfigCommon_t
*
scc
,
int
n
,
int
CCid
);
void
nr_sr_reporting
(
gNB_MAC_INST
*
nrmac
,
frame_t
frameP
,
sub_frame_t
slotP
);
void
nr_sr_reporting
(
gNB_MAC_INST
*
nrmac
,
frame_t
frameP
,
sub_frame_t
slotP
,
int
mod_id
);
size_t
dump_mac_stats
(
gNB_MAC_INST
*
gNB
,
char
*
output
,
size_t
strlen
,
bool
reset_rsrp
);
size_t
dump_mac_stats
(
gNB_MAC_INST
*
gNB
,
char
*
output
,
size_t
strlen
,
bool
reset_rsrp
);
...
...
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
View file @
86cf5e70
...
@@ -123,6 +123,7 @@ typedef struct NR_UE_UL_BWP {
...
@@ -123,6 +123,7 @@ typedef struct NR_UE_UL_BWP {
uint8_t
transform_precoding
;
uint8_t
transform_precoding
;
uint8_t
mcs_table
;
uint8_t
mcs_table
;
nr_dci_format_t
dci_format
;
nr_dci_format_t
dci_format
;
int
max_fb_time
;
}
NR_UE_UL_BWP_t
;
}
NR_UE_UL_BWP_t
;
typedef
enum
{
typedef
enum
{
...
@@ -354,6 +355,7 @@ typedef struct UE_info {
...
@@ -354,6 +355,7 @@ typedef struct UE_info {
}
NR_UE_mac_ce_ctrl_t
;
}
NR_UE_mac_ce_ctrl_t
;
typedef
struct
NR_sched_pucch
{
typedef
struct
NR_sched_pucch
{
bool
active
;
int
frame
;
int
frame
;
int
ul_slot
;
int
ul_slot
;
bool
sr_flag
;
bool
sr_flag
;
...
@@ -560,9 +562,11 @@ typedef struct {
...
@@ -560,9 +562,11 @@ typedef struct {
/// corresponding to the sched_pusch/sched_pdsch structures below
/// corresponding to the sched_pusch/sched_pdsch structures below
int
cce_index
;
int
cce_index
;
uint8_t
aggregation_level
;
uint8_t
aggregation_level
;
/// PUCCH scheduling information. Array of two: HARQ+SR in the first field,
/// PUCCH scheduling information. Array of two: HARQ+SR in the first field,
/// CSI in second. This order is important for nr_acknack_scheduling()!
/// CSI in second. This order is important for nr_acknack_scheduling()!
NR_sched_pucch_t
sched_pucch
[
2
];
NR_sched_pucch_t
*
sched_pucch
;
int
sched_pucch_size
;
/// Sched PUSCH: scheduling decisions, copied into HARQ and cleared every TTI
/// Sched PUSCH: scheduling decisions, copied into HARQ and cleared every TTI
NR_sched_pusch_t
sched_pusch
;
NR_sched_pusch_t
sched_pusch
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment