Commit 97dc9997 authored by mir's avatar mir

Fist commit

parent ef06b145
...@@ -244,7 +244,7 @@ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -ggdb2 -Wl,-rpath -Wl,${C ...@@ -244,7 +244,7 @@ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -ggdb2 -Wl,-rpath -Wl,${C
# these changes are related to hardcoded path to include .h files # these changes are related to hardcoded path to include .h files
set(debugOpt "-ggdb2 -DMALLOC_CHECK_=3 -fno-delete-null-pointer-checks") set(debugOpt "-ggdb2 -DMALLOC_CHECK_=3 -fno-delete-null-pointer-checks")
set(CMAKE_C_FLAGS_DEBUG "${debugOpt} -O0") set(CMAKE_C_FLAGS_DEBUG "${debugOpt} -O0")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${debugOpt} -O2") set(CMAKE_C_FLAGS_RELWITHDEBINFO "${debugOpt} -O3")
set(CMAKE_C_FLAGS_RELEASE "-O3") set(CMAKE_C_FLAGS_RELEASE "-O3")
# Enable assert() for RelWithDebInfo builds # Enable assert() for RelWithDebInfo builds
...@@ -2230,6 +2230,8 @@ add_executable(nr-softmodem ...@@ -2230,6 +2230,8 @@ add_executable(nr-softmodem
${nr_rrc_h} ${nr_rrc_h}
${s1ap_h} ${s1ap_h}
# ${OPENAIR_BIN_DIR}/messages_xml.h # ${OPENAIR_BIN_DIR}/messages_xml.h
${OPENAIR_DIR}/common/utils/thread_pool/task_manager.c
${OPENAIR_DIR}/executables/nr-gnb.c ${OPENAIR_DIR}/executables/nr-gnb.c
${OPENAIR_DIR}/executables/nr-ru.c ${OPENAIR_DIR}/executables/nr-ru.c
${OPENAIR_DIR}/executables/nr-softmodem.c ${OPENAIR_DIR}/executables/nr-softmodem.c
...@@ -2401,6 +2403,7 @@ target_link_libraries(ldpctest PRIVATE ...@@ -2401,6 +2403,7 @@ target_link_libraries(ldpctest PRIVATE
) )
add_executable(nr_dlschsim add_executable(nr_dlschsim
${OPENAIR_DIR}/common/utils/thread_pool/task_manager.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/dlschsim.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/dlschsim.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c
${OPENAIR_DIR}/common/utils/nr/nr_common.c ${OPENAIR_DIR}/common/utils/nr/nr_common.c
...@@ -2461,6 +2464,7 @@ target_link_libraries(nr_dlsim PRIVATE ...@@ -2461,6 +2464,7 @@ target_link_libraries(nr_dlsim PRIVATE
target_link_libraries(nr_dlsim PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs) target_link_libraries(nr_dlsim PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs)
add_executable(nr_prachsim add_executable(nr_prachsim
${OPENAIR_DIR}/common/utils/thread_pool/task_manager.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/prachsim.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/prachsim.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c
${OPENAIR_DIR}/common/utils/nr/nr_common.c ${OPENAIR_DIR}/common/utils/nr/nr_common.c
...@@ -2486,6 +2490,7 @@ target_link_libraries(nr_ulschsim PRIVATE ...@@ -2486,6 +2490,7 @@ target_link_libraries(nr_ulschsim PRIVATE
target_link_libraries(nr_ulschsim PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs) target_link_libraries(nr_ulschsim PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs)
add_executable(nr_ulsim add_executable(nr_ulsim
${OPENAIR_DIR}/common/utils/thread_pool/task_manager.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/ulsim.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/ulsim.c
${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c ${OPENAIR1_DIR}/SIMULATION/NR_PHY/nr_dummy_functions.c
${OPENAIR_DIR}/common/utils/nr/nr_common.c ${OPENAIR_DIR}/common/utils/nr/nr_common.c
......
Thread Pool implemented in C following the talk of Sean Parent "Better Code: Concurrency" from 2016
#include <assert.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "task_manager.h"
#define NUM_THREADS 4
#define NUM_JOBS 1024
int64_t time_now_us(void)
{
struct timespec tms;
/* The C11 way */
/* if (! timespec_get(&tms, TIME_UTC)) */
/* POSIX.1-2008 way */
if (clock_gettime(CLOCK_MONOTONIC_RAW,&tms)) {
return -1;
}
/* seconds, multiplied with 1 million */
int64_t micros = tms.tv_sec * 1000000;
/* Add full microseconds */
int64_t const tv_nsec = tms.tv_nsec;
micros += tv_nsec/1000;
/* round up if necessary */
if (tv_nsec % 1000 >= 500) {
++micros;
}
return micros;
}
typedef struct{
int64_t a;
int64_t time;
} pair_t;
static inline
int64_t naive_fibonnacci(int64_t a)
{
assert(a < 1000);
if(a < 2)
return a;
return naive_fibonnacci(a-1) + naive_fibonnacci(a-2);
}
//static _Thread_local int64_t counter = 0;
static
int marker_fd;
void do_work(void* arg)
{
int64_t now = time_now_us();
pair_t* a = (pair_t*)arg;
naive_fibonnacci(23 + a->a);
int64_t stop = time_now_us();
char buffer[100] = {0};
int ret = snprintf(buffer, 100, "ID %lu Fib elapsed %ld start-stop %ld - %ld \n", pthread_self(), stop - now, now, stop);
assert(ret > 0 && ret < 100);
// write_marker_ft_mir(marker_fd, buffer);
// puts(buffer);
}
int main()
{
task_manager_t man = {0};
init_task_manager(&man, NUM_THREADS);
usleep(100);
pair_t* arr = calloc(NUM_JOBS, sizeof(pair_t));
assert(arr != NULL);
int64_t now = time_now_us();
for(int k = 0; k < NUM_JOBS/8; ++k){
for(int i = 0; i < 8; ++i){
pair_t* pa = &arr[i];
pa->a = 0; //i%10;
pa->time = 0;
task_t t = {.args = pa, t.func = do_work};
async_task_manager(&man, t);
}
printf("Waiting %ld \n", time_now_us());
trigger_and_wait_all_task_manager(&man);
printf("Done %ld \n", time_now_us());
//usleep(1024);
}
free_task_manager(&man, NULL);
printf("Total elapsed %ld \n", time_now_us() - now);
free(arr);
return EXIT_SUCCESS;
}
#!/bin/bash
# Full dyntick CPU on which we'll run the user loop,
# it must be part of nohz_full kernel parameter
TARGET=4
# Migrate all possible tasks to CPU 0
for P in $(ls /proc)
do
if [ -x "/proc/$P/task/" ]
then
echo $P
taskset -acp 0 $P
fi
done
# Migrate irqs to CPU 0
for D in $(ls /proc/irq)
do
if [[ -x "/proc/irq/$D" && $D != "0" ]]
then
echo $D
echo 1 > /proc/irq/$D/smp_affinity
fi
done
# Delay the annoying vmstat timer far away
sysctl vm.stat_interval=120
# Shutdown nmi watchdog as it uses perf events
sysctl -w kernel.watchdog=0
# Remove -rt task runtime limit
echo -1 > /proc/sys/kernel/sched_rt_runtime_us
# Pin the writeback workqueue to CPU0
echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
DIR=/sys/kernel/debug/tracing
echo > $DIR/trace
echo 0 > $DIR/tracing_on
# Uncomment the below for more details on what disturbs the CPU
echo 0 > $DIR/events/irq/enable
echo 1 > $DIR/events/sched/sched_switch/enable
echo 1 > $DIR/events/workqueue/workqueue_queue_work/enable
echo 1 > $DIR/events/workqueue/workqueue_execute_start/enable
echo 1 > $DIR/events/timer/hrtimer_expire_entry/enable
echo 1 > $DIR/events/timer/tick_stop/enable
echo nop > $DIR/current_tracer
echo 1 > $DIR/tracing_on
# Run a 10 secs user loop on target
taskset -c 3-7 ./a.out
#sleep 20
#killall a.out
# Checkout the trace in trace.* file
cat /sys/kernel/debug/tracing/per_cpu/cpu4/trace > trace.4
cat /sys/kernel/debug/tracing/per_cpu/cpu5/trace > trace.5
cat /sys/kernel/debug/tracing/per_cpu/cpu6/trace > trace.6
cat /sys/kernel/debug/tracing/per_cpu/cpu7/trace > trace.7
#ifndef TASK_WORK_STEALING_THREAD_POOL_H
#define TASK_WORK_STEALING_THREAD_POOL_H
typedef struct{
void* args;
void (*func)(void* args);
} task_t;
#endif
#define _GNU_SOURCE
#include <unistd.h>
#include "task_manager.h"
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <poll.h>
#include <sys/types.h>
#include <fcntl.h>
#include <linux/futex.h> /* Definition of FUTEX_* constants */
#include <sys/syscall.h> /* Definition of SYS_* constants */
#include <unistd.h>
#if defined (__i386__) || defined(__x86_64__)
#define pause_or_yield __builtin_ia32_pause
#elif __aarch64__
#define pause_or_yield() asm volatile("yield" ::: "memory")
#else
static_assert(0!=0, "Unknown CPU architecture");
#endif
static
int64_t time_now_us(void)
{
struct timespec tms;
/* The C11 way */
/* if (! timespec_get(&tms, TIME_UTC)) */
/* POSIX.1-2008 way */
if (clock_gettime(CLOCK_MONOTONIC_RAW, &tms)) {
return -1;
}
/* seconds, multiplied with 1 million */
int64_t micros = tms.tv_sec * 1000000;
/* Add full microseconds */
int64_t const tv_nsec = tms.tv_nsec;
micros += tv_nsec/1000;
/* round up if necessary */
if (tv_nsec % 1000 >= 500) {
++micros;
}
return micros;
}
static
void pin_thread_to_core(int core_num)
{
cpu_set_t set = {0};
CPU_ZERO(&set);
CPU_SET(core_num, &set);
int ret = sched_setaffinity(gettid(), sizeof(set), &set);
assert(ret != -1);
printf("Pining into core %d id %ld \n", core_num, pthread_self());
}
//////////////////////////////
//////////////////////////////
////////// RING //
//////////////////////////////
//////////////////////////////
//////////////////////////////
typedef struct seq_ring_buf_s
{
// const size_t elt_size;
task_t* array;
size_t cap;
uint32_t head;
uint32_t tail;
_Atomic uint64_t sz;
} seq_ring_task_t;
typedef void (*seq_free_func)(task_t*);
// For working correctly, maintain the default elements to a multiple of 2
#define DEFAULT_ELM 32
static
size_t size_seq_ring_task(seq_ring_task_t* r)
{
assert(r != NULL);
return r->head - r->tail;
}
inline static
uint32_t mask(uint32_t cap, uint32_t val)
{
return val & (cap-1);
}
static
bool full(seq_ring_task_t* r)
{
return size_seq_ring_task(r) == r->cap -1;
}
static
void enlarge_buffer(seq_ring_task_t* r)
{
assert(r != NULL);
assert(full(r));
const uint32_t factor = 2;
task_t* tmp_buffer = calloc(r->cap * factor, sizeof(task_t) );
assert(tmp_buffer != NULL);
const uint32_t head_pos = mask(r->cap, r->head);
const uint32_t tail_pos = mask(r->cap, r->tail);
if(head_pos > tail_pos){
memcpy(tmp_buffer, r->array + tail_pos , (head_pos-tail_pos)*sizeof(task_t) );
} else {
memcpy(tmp_buffer, r->array + tail_pos, (r->cap-tail_pos)*sizeof(task_t));
memcpy(tmp_buffer + (r->cap-tail_pos), r->array, head_pos*sizeof(task_t));
}
r->cap *= factor;
free(r->array);
r->array = tmp_buffer;
r->tail = 0;
r->head = r->cap/2 - 1;
}
static
void init_seq_ring_task(seq_ring_task_t* r)
{
assert(r != NULL);
task_t* tmp_buffer = calloc(DEFAULT_ELM, sizeof(task_t));
assert(tmp_buffer != NULL);
seq_ring_task_t tmp = {.array = tmp_buffer, .head = 0, .tail = 0, .cap = DEFAULT_ELM};
memcpy(r, &tmp, sizeof(seq_ring_task_t));
r->sz = 0;
}
static
void free_seq_ring_task(seq_ring_task_t* r, seq_free_func fp)
{
assert(r != NULL);
assert(fp == NULL);
free(r->array);
}
static
void push_back_seq_ring_task(seq_ring_task_t* r, task_t t)
{
assert(r != NULL);
if(full(r))
enlarge_buffer(r);
const uint32_t pos = mask(r->cap, r->head);
r->array[pos] = t;
r->head += 1;
r->sz += 1;
}
static
task_t pop_seq_ring_task(seq_ring_task_t* r )
{
assert(r != NULL);
assert(size_seq_ring_task(r) > 0);
const uint32_t pos = mask(r->cap, r->tail);
task_t t = r->array[pos];
r->tail += 1;
r->sz -= 1;
return t;
}
//////////////////////////////
//////////////////////////////
////////// END RING //
//////////////////////////////
//////////////////////////////
//////////////////////////////
//////////////////////////////
//////////////////////////////
////////// Start Notification Queue //
//////////////////////////////
//////////////////////////////
//////////////////////////////
typedef struct {
pthread_mutex_t mtx;
pthread_cond_t cv;
seq_ring_task_t r;
_Atomic int32_t* futex;
_Atomic bool* waiting;
_Atomic int done;
} not_q_t;
typedef struct{
task_t t;
bool success;
} ret_try_t;
static
void init_not_q(not_q_t* q, _Atomic int32_t* futex, _Atomic bool* waiting)
{
assert(q != NULL);
q->done = 0;
q->waiting = waiting;
init_seq_ring_task(&q->r);
pthread_mutexattr_t attr = {0};
#ifdef _DEBUG
int const rc_mtx = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
assert(rc_mtx == 0);
#endif
int rc = pthread_mutex_init(&q->mtx, &attr);
assert(rc == 0 && "Error while creating the mtx");
pthread_condattr_t* c_attr = NULL;
rc = pthread_cond_init(&q->cv, c_attr);
assert(rc == 0);
q->futex = futex;
}
static
void free_not_q(not_q_t* q, void (*clean)(task_t*) )
{
assert(q != NULL);
assert(q->done == 1);
free_seq_ring_task(&q->r, clean);
int rc = pthread_mutex_destroy(&q->mtx);
assert(rc == 0);
rc = pthread_cond_destroy(&q->cv);
assert(rc == 0);
}
static
bool try_push_not_q(not_q_t* q, task_t t)
{
assert(q != NULL);
assert(q->done == 0 || q->done ==1);
assert(t.func != NULL);
assert(t.args != NULL);
if(pthread_mutex_trylock(&q->mtx ) != 0)
return false;
push_back_seq_ring_task(&q->r, t);
int const rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
return true;
}
static
void push_not_q(not_q_t* q, task_t t)
{
assert(q != NULL);
assert(q->done == 0 || q->done ==1);
assert(t.func != NULL);
int const rc = pthread_mutex_lock(&q->mtx);
assert(rc == 0);
push_back_seq_ring_task(&q->r, t);
pthread_mutex_unlock(&q->mtx);
}
static
ret_try_t try_pop_not_q(not_q_t* q)
{
assert(q != NULL);
ret_try_t ret = {.success = false};
int rc = pthread_mutex_trylock(&q->mtx);
assert(rc == 0 || rc == EBUSY);
if(rc == EBUSY)
return ret;
assert(q->done == 0 || q->done ==1);
size_t sz = size_seq_ring_task(&q->r);
if(sz == 0){
rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
return ret;
}
ret.t = pop_seq_ring_task(&q->r);
rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
ret.success = true;
return ret;
}
static
bool pop_not_q(not_q_t* q, ret_try_t* out)
{
assert(q != NULL);
assert(out != NULL);
assert(q->done == 0 || q->done ==1);
label:
// Let's be conservative and not use memory_order_relaxed
while (atomic_load_explicit(q->waiting, memory_order_seq_cst) == true){ //
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
// hyper-threads
pause_or_yield();
}
pthread_mutex_lock(&q->mtx);
if(size_seq_ring_task(&q->r) == 0 && q->done == 0){
int rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
int val = *q->futex; // atomic_load_explicit(q->futex, memory_order_acquire);
long r = syscall(SYS_futex, q->futex, FUTEX_WAIT_PRIVATE, val, NULL, 0);
assert(r != -1);
goto label;
}
assert(q->done == 0 || q->done ==1);
//printf("Waking %ld id %ld \n", time_now_us(), pthread_self());
if(q->done == 1){
//printf("Done, returning \n");
int rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
return false;
}
out->t = pop_seq_ring_task(&q->r);
int rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
return true;
}
static
void done_not_q(not_q_t* q)
{
assert(q != NULL);
int rc = pthread_mutex_lock(&q->mtx);
assert(rc == 0);
q->done = 1;
long r = syscall(SYS_futex, q->futex, FUTEX_WAKE_PRIVATE, 1 , NULL, NULL, 0);
assert(r != -1);
rc = pthread_mutex_unlock(&q->mtx);
assert(rc == 0);
// rc = pthread_cond_signal(&q->cv);
// assert(rc == 0);
// q->futex++;
}
//////////////////////////////
//////////////////////////////
////////// END Notification Queue //
//////////////////////////////
//////////////////////////////
//////////////////////////////
//static int marker_fd;
typedef struct{
task_manager_t* man;
int idx;
} task_thread_args_t;
static
void* worker_thread(void* arg)
{
assert(arg != NULL);
task_thread_args_t* args = (task_thread_args_t*)arg;
int const idx = args->idx;
pin_thread_to_core(idx+4);
task_manager_t* man = args->man;
uint32_t const len = man->len_thr;
uint32_t const num_it = 2*(man->len_thr + idx);
not_q_t* q_arr = (not_q_t*)man->q_arr;
int acc_num_task = 0;
for(;;){
ret_try_t ret = {.success = false};
for(uint32_t i = idx; i < num_it; ++i){
ret = try_pop_not_q(&q_arr[i%len]);
if(ret.success == true){
break;
}
}
if(ret.success == false){
man->num_task -= acc_num_task;
acc_num_task = 0;
if(pop_not_q(&q_arr[idx], &ret) == false)
break;
}
//int64_t now = time_now_us();
//printf("Calling fuinc \n");
ret.t.func(ret.t.args);
//printf("Returning from func \n");
//int64_t stop = time_now_us();
acc_num_task +=1;
}
free(args);
return NULL;
}
void init_task_manager(task_manager_t* man, uint32_t num_threads)
{
assert(man != NULL);
assert(num_threads > 0 && num_threads < 33 && "Do you have zero or more than 32 processors??");
man->q_arr = calloc(num_threads, sizeof(not_q_t));
assert(man->q_arr != NULL && "Memory exhausted");
man->futex = 0;
man->waiting = false;
not_q_t* q_arr = (not_q_t*)man->q_arr;
for(uint32_t i = 0; i < num_threads; ++i){
init_not_q(&q_arr[i], &man->futex, &man->waiting);
}
man->t_arr = calloc(num_threads, sizeof(pthread_t));
assert(man->t_arr != NULL && "Memory exhausted" );
man->len_thr = num_threads;
for(uint32_t i = 0; i < num_threads; ++i){
task_thread_args_t* args = malloc(sizeof(task_thread_args_t) );
args->idx = i;
args->man = man;
pthread_attr_t attr = {0};
int ret=pthread_attr_init(&attr);
assert(ret == 0);
ret=pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
assert(ret == 0);
ret=pthread_attr_setschedpolicy(&attr, SCHED_RR);
assert(ret == 0);
struct sched_param sparam={0};
sparam.sched_priority = 94;
ret=pthread_attr_setschedparam(&attr, &sparam);
int rc = pthread_create(&man->t_arr[i], &attr, worker_thread, args);
assert(rc == 0);
}
man->index = 0;
pthread_mutexattr_t attr = {0};
#ifdef _DEBUG
int const rc_mtx = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
assert(rc_mtx == 0);
#endif
int rc = pthread_mutex_init(&man->wait_mtx, &attr);
assert(rc == 0 && "Error while creating the mtx");
pthread_condattr_t* c_attr = NULL;
rc = pthread_cond_init(&man->wait_cv, c_attr);
assert(rc == 0);
//pin_thread_to_core(3);
}
void free_task_manager(task_manager_t* man, void (*clean)(task_t*))
{
not_q_t* q_arr = (not_q_t*)man->q_arr;
atomic_store(&man->waiting, false);
for(uint32_t i = 0; i < man->len_thr; ++i){
done_not_q(&q_arr[i]);
}
for(uint32_t i = 0; i < man->len_thr; ++i){
int rc = pthread_join(man->t_arr[i], NULL);
assert(rc == 0);
}
for(uint32_t i = 0; i < man->len_thr; ++i){
free_not_q(&q_arr[i], clean);
}
free(man->q_arr);
free(man->t_arr);
int rc = pthread_mutex_destroy(&man->wait_mtx);
assert(rc == 0);
rc = pthread_cond_destroy(&man->wait_cv);
assert(rc == 0);
}
void async_task_manager(task_manager_t* man, task_t t)
{
assert(man != NULL);
assert(t.func != NULL);
//assert(t.args != NULL);
uint64_t const index = man->index++;
const uint32_t len_thr = man->len_thr;
not_q_t* q_arr = (not_q_t*)man->q_arr;
for(uint32_t i = 0; i < len_thr ; ++i){
if(try_push_not_q(&q_arr[(i+index) % len_thr], t)){
man->num_task +=1;
return;
}
}
push_not_q(&q_arr[index%len_thr], t);
man->num_task +=1;
}
void trigger_and_spin(task_manager_t* man)
{
assert(man != NULL);
atomic_store(&man->waiting, true);
// FUTEX_WAKE_PRIVATE
man->futex = 0;
long r = syscall(SYS_futex, &man->futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0);
//printf("Number of threads woken %ld \n", r);
if(r == -1){
printf("%d errno \n", errno);
}
assert(r != -1);
}
void trigger_and_wait_all_task_manager(task_manager_t* man)
{
assert(man != NULL);
trigger_and_spin(man);
atomic_store(&man->waiting, false);
// GCC thread sanitizer complains, while Clang does not. Let's be conservative and pay some cycles
// memory_order_relaxed
//printf("Main func waiting \n");
while (atomic_load_explicit(&man->num_task, memory_order_relaxed)){
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
// hyper-threads
pause_or_yield();
}
//printf("Main func returnning \n");
atomic_store(&man->waiting, true);
}
void wait_all_task_manager(task_manager_t* man)
{
assert(man != NULL);
while (atomic_load_explicit(&man->num_task, memory_order_relaxed)){
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
// hyper-threads
pause_or_yield();
}
}
#ifndef TASK_MANAGER_WORKING_STEALING_H
#define TASK_MANAGER_WORKING_STEALING_H
// Comment for deactivating ws tpool
//#define TASK_MANAGER
#include "task.h"
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
typedef struct{
pthread_t* t_arr;
size_t len_thr;
atomic_uint_fast64_t index;
void* q_arr;
atomic_uint_fast64_t num_task;
pthread_cond_t wait_cv;
pthread_mutex_t wait_mtx;
_Atomic int32_t futex;
_Atomic bool waiting;
} task_manager_t;
void init_task_manager(task_manager_t* man, uint32_t num_threads);
void free_task_manager(task_manager_t* man, void (*clean)(task_t* args) );
void async_task_manager(task_manager_t* man, task_t t);
void trigger_and_spin(task_manager_t* man);
void trigger_and_wait_all_task_manager(task_manager_t* man);
void wait_all_task_manager(task_manager_t* man);
#endif
...@@ -47,6 +47,41 @@ ...@@ -47,6 +47,41 @@
#include "common/utils/LOG/vcd_signal_dumper.h" #include "common/utils/LOG/vcd_signal_dumper.h"
#include "common/utils/LOG/log.h" #include "common/utils/LOG/log.h"
#include <syscall.h> #include <syscall.h>
//#include "SCHED_NR/phy_procedures_nr_gNB.h"
#include "common/utils/thread_pool/task_manager.h"
#include <stdint.h>
#include <time.h>
#include <stdalign.h>
//#include "nr_ulsch_decoding.h"
static inline
int64_t time_now_us(void)
{
struct timespec tms;
/* The C11 way */
/* if (! timespec_get(&tms, TIME_UTC)) */
/* POSIX.1-2008 way */
if (clock_gettime(CLOCK_REALTIME,&tms)) {
return -1;
}
/* seconds, multiplied with 1 million */
int64_t micros = tms.tv_sec * 1000000;
/* Add full microseconds */
micros += tms.tv_nsec/1000;
/* round up if necessary */
if (tms.tv_nsec % 1000 >= 500) {
++micros;
}
return micros;
}
//#define DEBUG_ULSCH_DECODING //#define DEBUG_ULSCH_DECODING
//#define gNB_DEBUG_TRACE //#define gNB_DEBUG_TRACE
...@@ -117,142 +152,142 @@ NR_gNB_ULSCH_t new_gNB_ulsch(uint8_t max_ldpc_iterations, uint16_t N_RB_UL) ...@@ -117,142 +152,142 @@ NR_gNB_ULSCH_t new_gNB_ulsch(uint8_t max_ldpc_iterations, uint16_t N_RB_UL)
static uint32_t prnt_crc_cnt = 0; static uint32_t prnt_crc_cnt = 0;
#endif #endif
void nr_processULSegment(void *arg) void nr_processULSegment(void *arg)
{ {
ldpcDecode_t *rdata = (ldpcDecode_t *)arg; ldpcDecode_t *rdata = (ldpcDecode_t *)arg;
PHY_VARS_gNB *phy_vars_gNB = rdata->gNB; PHY_VARS_gNB *phy_vars_gNB = rdata->gNB;
NR_UL_gNB_HARQ_t *ulsch_harq = rdata->ulsch_harq; NR_UL_gNB_HARQ_t *ulsch_harq = rdata->ulsch_harq;
t_nrLDPC_dec_params *p_decoderParms = &rdata->decoderParms; t_nrLDPC_dec_params *p_decoderParms = &rdata->decoderParms;
int length_dec; int length_dec;
int no_iteration_ldpc; int no_iteration_ldpc;
int Kr; int Kr;
int Kr_bytes; int Kr_bytes;
int K_bits_F; int K_bits_F;
uint8_t crc_type; uint8_t crc_type;
int i; int i;
int j; int j;
int r = rdata->segment_r; int r = rdata->segment_r;
int A = rdata->A; int A = rdata->A;
int E = rdata->E; int E = rdata->E;
int Qm = rdata->Qm; int Qm = rdata->Qm;
int rv_index = rdata->rv_index; int rv_index = rdata->rv_index;
int r_offset = rdata->r_offset; int r_offset = rdata->r_offset;
uint8_t kc = rdata->Kc; uint8_t kc = rdata->Kc;
short *ulsch_llr = rdata->ulsch_llr; short *ulsch_llr = rdata->ulsch_llr;
int max_ldpc_iterations = p_decoderParms->numMaxIter; int max_ldpc_iterations = p_decoderParms->numMaxIter;
int8_t llrProcBuf[OAI_UL_LDPC_MAX_NUM_LLR] __attribute__((aligned(32))); int8_t llrProcBuf[OAI_UL_LDPC_MAX_NUM_LLR] __attribute__((aligned(32)));
int16_t z[68 * 384 + 16] __attribute__((aligned(16))); int16_t z[68 * 384 + 16] __attribute__((aligned(16)));
int8_t l[68 * 384 + 16] __attribute__((aligned(16))); int8_t l[68 * 384 + 16] __attribute__((aligned(16)));
__m128i *pv = (__m128i *)&z; __m128i *pv = (__m128i *)&z;
__m128i *pl = (__m128i *)&l; __m128i *pl = (__m128i *)&l;
Kr = ulsch_harq->K; Kr = ulsch_harq->K;
Kr_bytes = Kr >> 3; Kr_bytes = Kr >> 3;
K_bits_F = Kr - ulsch_harq->F; K_bits_F = Kr - ulsch_harq->F;
t_nrLDPC_time_stats procTime = {0}; t_nrLDPC_time_stats procTime = {0};
t_nrLDPC_time_stats *p_procTime = &procTime; t_nrLDPC_time_stats *p_procTime = &procTime;
// start_meas(&phy_vars_gNB->ulsch_deinterleaving_stats); // start_meas(&phy_vars_gNB->ulsch_deinterleaving_stats);
//////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// nr_deinterleaving_ldpc /////////////////////////////////// ///////////////////////////////// nr_deinterleaving_ldpc ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// ulsch_llr =====> ulsch_harq->e ////////////////////////////// //////////////////////////// ulsch_llr =====> ulsch_harq->e //////////////////////////////
/// code blocks after bit selection in rate matching for LDPC code (38.212 V15.4.0 section 5.4.2.1) /// code blocks after bit selection in rate matching for LDPC code (38.212 V15.4.0 section 5.4.2.1)
int16_t harq_e[E]; int16_t harq_e[E];
nr_deinterleaving_ldpc(E, Qm, harq_e, ulsch_llr + r_offset); nr_deinterleaving_ldpc(E, Qm, harq_e, ulsch_llr + r_offset);
// for (int i =0; i<16; i++) // for (int i =0; i<16; i++)
// printf("rx output deinterleaving w[%d]= %d r_offset %d\n", i,ulsch_harq->w[r][i], r_offset); // printf("rx output deinterleaving w[%d]= %d r_offset %d\n", i,ulsch_harq->w[r][i], r_offset);
stop_meas(&phy_vars_gNB->ulsch_deinterleaving_stats); stop_meas(&phy_vars_gNB->ulsch_deinterleaving_stats);
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////// nr_rate_matching_ldpc_rx //////////////////////////////// //////////////////////////////// nr_rate_matching_ldpc_rx ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
///////////////////////// ulsch_harq->e =====> ulsch_harq->d ///////////////////////// ///////////////////////// ulsch_harq->e =====> ulsch_harq->d /////////////////////////
// start_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats); // start_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats);
if (nr_rate_matching_ldpc_rx(rdata->tbslbrm, if (nr_rate_matching_ldpc_rx(rdata->tbslbrm,
p_decoderParms->BG, p_decoderParms->BG,
p_decoderParms->Z, p_decoderParms->Z,
ulsch_harq->d[r], ulsch_harq->d[r],
harq_e, harq_e,
ulsch_harq->C, ulsch_harq->C,
rv_index, rv_index,
ulsch_harq->d_to_be_cleared[r], ulsch_harq->d_to_be_cleared[r],
E, E,
ulsch_harq->F, ulsch_harq->F,
Kr - ulsch_harq->F - 2 * (p_decoderParms->Z)) Kr - ulsch_harq->F - 2 * (p_decoderParms->Z))
== -1) { == -1) {
stop_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats); stop_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats);
LOG_E(PHY, "ulsch_decoding.c: Problem in rate_matching\n"); LOG_E(PHY, "ulsch_decoding.c: Problem in rate_matching\n");
rdata->decodeIterations = max_ldpc_iterations + 1; rdata->decodeIterations = max_ldpc_iterations + 1;
return; return;
} else { } else {
stop_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats); stop_meas(&phy_vars_gNB->ulsch_rate_unmatching_stats);
} }
ulsch_harq->d_to_be_cleared[r] = false; ulsch_harq->d_to_be_cleared[r] = false;
memset(ulsch_harq->c[r], 0, Kr_bytes); memset(ulsch_harq->c[r], 0, Kr_bytes);
if (ulsch_harq->C == 1) { if (ulsch_harq->C == 1) {
if (A > 3824) if (A > 3824)
crc_type = CRC24_A; crc_type = CRC24_A;
else else
crc_type = CRC16; crc_type = CRC16;
length_dec = ulsch_harq->B; length_dec = ulsch_harq->B;
} else { } else {
crc_type = CRC24_B; crc_type = CRC24_B;
length_dec = (ulsch_harq->B + 24 * ulsch_harq->C) / ulsch_harq->C; length_dec = (ulsch_harq->B + 24 * ulsch_harq->C) / ulsch_harq->C;
} }
// start_meas(&phy_vars_gNB->ulsch_ldpc_decoding_stats); // start_meas(&phy_vars_gNB->ulsch_ldpc_decoding_stats);
// set first 2*Z_c bits to zeros // set first 2*Z_c bits to zeros
memset(&z[0], 0, 2 * ulsch_harq->Z * sizeof(int16_t)); memset(&z[0], 0, 2 * ulsch_harq->Z * sizeof(int16_t));
// set Filler bits // set Filler bits
memset((&z[0] + K_bits_F), 127, ulsch_harq->F * sizeof(int16_t)); memset((&z[0] + K_bits_F), 127, ulsch_harq->F * sizeof(int16_t));
// Move coded bits before filler bits // Move coded bits before filler bits
memcpy((&z[0] + 2 * ulsch_harq->Z), ulsch_harq->d[r], (K_bits_F - 2 * ulsch_harq->Z) * sizeof(int16_t)); memcpy((&z[0] + 2 * ulsch_harq->Z), ulsch_harq->d[r], (K_bits_F - 2 * ulsch_harq->Z) * sizeof(int16_t));
// skip filler bits // skip filler bits
memcpy((&z[0] + Kr), ulsch_harq->d[r] + (Kr - 2 * ulsch_harq->Z), (kc * ulsch_harq->Z - Kr) * sizeof(int16_t)); memcpy((&z[0] + Kr), ulsch_harq->d[r] + (Kr - 2 * ulsch_harq->Z), (kc * ulsch_harq->Z - Kr) * sizeof(int16_t));
// Saturate coded bits before decoding into 8 bits values // Saturate coded bits before decoding into 8 bits values
for (i = 0, j = 0; j < ((kc * ulsch_harq->Z) >> 4) + 1; i += 2, j++) { for (i = 0, j = 0; j < ((kc * ulsch_harq->Z) >> 4) + 1; i += 2, j++) {
pl[j] = _mm_packs_epi16(pv[i], pv[i + 1]); pl[j] = _mm_packs_epi16(pv[i], pv[i + 1]);
} }
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////// nrLDPC_decoder ///////////////////////////////////// ///////////////////////////////////// nrLDPC_decoder /////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// pl =====> llrProcBuf ////////////////////////////////// ////////////////////////////////// pl =====> llrProcBuf //////////////////////////////////
p_decoderParms->block_length = length_dec; p_decoderParms->block_length = length_dec;
no_iteration_ldpc = nrLDPC_decoder(p_decoderParms, (int8_t *)&pl[0], llrProcBuf, p_procTime); no_iteration_ldpc = nrLDPC_decoder(p_decoderParms, (int8_t *)&pl[0], llrProcBuf, p_procTime);
if (check_crc((uint8_t *)llrProcBuf, length_dec, ulsch_harq->F, crc_type)) { if (check_crc((uint8_t *)llrProcBuf, length_dec, ulsch_harq->F, crc_type)) {
#ifdef PRINT_CRC_CHECK #ifdef PRINT_CRC_CHECK
LOG_I(PHY,"Segment %d CRC OK, iterations %d/%d\n",r,no_iteration_ldpc,max_ldpc_iterations); LOG_I(PHY,"Segment %d CRC OK, iterations %d/%d\n",r,no_iteration_ldpc,max_ldpc_iterations);
#endif #endif
rdata->decodeIterations = no_iteration_ldpc; rdata->decodeIterations = no_iteration_ldpc;
if (rdata->decodeIterations > p_decoderParms->numMaxIter) rdata->decodeIterations--; if (rdata->decodeIterations > p_decoderParms->numMaxIter) rdata->decodeIterations--;
} else { } else {
#ifdef PRINT_CRC_CHECK #ifdef PRINT_CRC_CHECK
LOG_I(PHY,"CRC NOK\n"); LOG_I(PHY,"CRC NOK\n");
#endif #endif
rdata->decodeIterations = max_ldpc_iterations + 1; rdata->decodeIterations = max_ldpc_iterations + 1;
} }
...@@ -261,8 +296,13 @@ NR_gNB_ULSCH_t new_gNB_ulsch(uint8_t max_ldpc_iterations, uint16_t N_RB_UL) ...@@ -261,8 +296,13 @@ NR_gNB_ULSCH_t new_gNB_ulsch(uint8_t max_ldpc_iterations, uint16_t N_RB_UL)
ulsch_harq->c[r][m]= (uint8_t) llrProcBuf[m]; ulsch_harq->c[r][m]= (uint8_t) llrProcBuf[m];
} }
#ifdef TASK_MANAGER
if( phy_vars_gNB->ldpc_offload_flag)
nr_postDecode(rdata->gNB, rdata);
#endif
//stop_meas(&phy_vars_gNB->ulsch_ldpc_decoding_stats); //stop_meas(&phy_vars_gNB->ulsch_ldpc_decoding_stats);
} }
uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB, uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB,
uint8_t ULSCH_id, uint8_t ULSCH_id,
...@@ -566,11 +606,21 @@ uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB, ...@@ -566,11 +606,21 @@ uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB,
else { else {
dtx_det = 0; dtx_det = 0;
#ifdef TASK_MANAGER
ldpcDecode_t* arr = calloc(harq_process->C, sizeof(ldpcDecode_t));
int idx_arr = 0;
#endif
for (int r = 0; r < harq_process->C; r++) { for (int r = 0; r < harq_process->C; r++) {
int E = nr_get_E(G, harq_process->C, Qm, n_layers, r); int E = nr_get_E(G, harq_process->C, Qm, n_layers, r);
#ifdef TASK_MANAGER
ldpcDecode_t* rdata = &arr[idx_arr];
++idx_arr;
#else
union ldpcReqUnion id = {.s = {ulsch->rnti, frame, nr_tti_rx, 0, 0}}; union ldpcReqUnion id = {.s = {ulsch->rnti, frame, nr_tti_rx, 0, 0}};
notifiedFIFO_elt_t *req = newNotifiedFIFO_elt(sizeof(ldpcDecode_t), id.p, &phy_vars_gNB->respDecode, &nr_processULSegment); notifiedFIFO_elt_t *req = newNotifiedFIFO_elt(sizeof(ldpcDecode_t), id.p, &phy_vars_gNB->respDecode, &nr_processULSegment);
ldpcDecode_t *rdata = (ldpcDecode_t *)NotifiedFifoData(req); ldpcDecode_t *rdata = (ldpcDecode_t *)NotifiedFifoData(req);
#endif
decParams.R = nr_get_R_ldpc_decoder(pusch_pdu->pusch_data.rv_index, E, decParams.BG, decParams.Z, &harq_process->llrLen, harq_process->round); decParams.R = nr_get_R_ldpc_decoder(pusch_pdu->pusch_data.rv_index, E, decParams.BG, decParams.Z, &harq_process->llrLen, harq_process->round);
rdata->gNB = phy_vars_gNB; rdata->gNB = phy_vars_gNB;
rdata->ulsch_harq = harq_process; rdata->ulsch_harq = harq_process;
...@@ -590,13 +640,24 @@ uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB, ...@@ -590,13 +640,24 @@ uint32_t nr_ulsch_decoding(PHY_VARS_gNB *phy_vars_gNB,
rdata->ulsch = ulsch; rdata->ulsch = ulsch;
rdata->ulsch_id = ULSCH_id; rdata->ulsch_id = ULSCH_id;
rdata->tbslbrm = pusch_pdu->maintenance_parms_v3.tbSizeLbrmBytes; rdata->tbslbrm = pusch_pdu->maintenance_parms_v3.tbSizeLbrmBytes;
#ifdef TASK_MANAGER
task_t t = { .args = rdata, .func = &nr_processULSegment };
async_task_manager(&phy_vars_gNB->man, t);
#else
pushTpool(&phy_vars_gNB->threadPool, req); pushTpool(&phy_vars_gNB->threadPool, req);
#endif
phy_vars_gNB->nbDecode++; phy_vars_gNB->nbDecode++;
LOG_D(PHY, "Added a block to decode, in pipe: %d\n", phy_vars_gNB->nbDecode); LOG_D(PHY, "Added a block to decode, in pipe: %d\n", phy_vars_gNB->nbDecode);
r_offset += E; r_offset += E;
offset += (Kr_bytes - (harq_process->F >> 3) - ((harq_process->C > 1) ? 3 : 0)); offset += (Kr_bytes - (harq_process->F >> 3) - ((harq_process->C > 1) ? 3 : 0));
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
} }
#ifdef TASK_MANAGER
trigger_and_wait_all_task_manager(&phy_vars_gNB->man);
free(arr);
#endif
} }
return 1; return 1;
} }
...@@ -44,6 +44,9 @@ ...@@ -44,6 +44,9 @@
#include "executables/rt_profiling.h" #include "executables/rt_profiling.h"
#include "nfapi_nr_interface_scf.h" #include "nfapi_nr_interface_scf.h"
#include "common/utils/thread_pool/task_manager.h"
#define MAX_NUM_RU_PER_gNB 8 #define MAX_NUM_RU_PER_gNB 8
#define MAX_PUCCH0_NID 8 #define MAX_PUCCH0_NID 8
...@@ -778,6 +781,11 @@ typedef struct PHY_VARS_gNB_s { ...@@ -778,6 +781,11 @@ typedef struct PHY_VARS_gNB_s {
void *scopeData; void *scopeData;
/// structure for analyzing high-level RT measurements /// structure for analyzing high-level RT measurements
rt_L1_profiling_t rt_L1_profiling; rt_L1_profiling_t rt_L1_profiling;
#ifdef TASK_MANAGER
task_manager_t man;
#endif
} PHY_VARS_gNB; } PHY_VARS_gNB;
typedef struct LDPCDecode_s { typedef struct LDPCDecode_s {
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#include "executables/softmodem-common.h" #include "executables/softmodem-common.h"
#include "nfapi/oai_integration/vendor_ext.h" #include "nfapi/oai_integration/vendor_ext.h"
#include "NR_SRS-ResourceSet.h" #include "NR_SRS-ResourceSet.h"
#include "common/utils/thread_pool/task_manager.h"
#include "assertions.h" #include "assertions.h"
...@@ -232,9 +234,15 @@ void phy_procedures_gNB_TX(processingData_L1tx_t *msgTx, ...@@ -232,9 +234,15 @@ void phy_procedures_gNB_TX(processingData_L1tx_t *msgTx,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_gNB_TX+offset,0); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_gNB_TX+offset,0);
} }
#ifdef TASK_MANAGER
void nr_postDecode(PHY_VARS_gNB *gNB, ldpcDecode_t *rdata)
{
#else
void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req) void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req)
{ {
ldpcDecode_t *rdata = (ldpcDecode_t*) NotifiedFifoData(req); ldpcDecode_t *rdata = (ldpcDecode_t*) NotifiedFifoData(req);
#endif
NR_UL_gNB_HARQ_t *ulsch_harq = rdata->ulsch_harq; NR_UL_gNB_HARQ_t *ulsch_harq = rdata->ulsch_harq;
NR_gNB_ULSCH_t *ulsch = rdata->ulsch; NR_gNB_ULSCH_t *ulsch = rdata->ulsch;
int r = rdata->segment_r; int r = rdata->segment_r;
...@@ -252,6 +260,8 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req) ...@@ -252,6 +260,8 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req)
} else { } else {
if ( rdata->nbSegments != ulsch_harq->processedSegments ) { if ( rdata->nbSegments != ulsch_harq->processedSegments ) {
// Let's forget about this optimization for now
#ifndef TASK_MANAGER
int nb = abortTpoolJob(&gNB->threadPool, req->key); int nb = abortTpoolJob(&gNB->threadPool, req->key);
nb += abortNotifiedFIFOJob(&gNB->respDecode, req->key); nb += abortNotifiedFIFOJob(&gNB->respDecode, req->key);
gNB->nbDecode-=nb; gNB->nbDecode-=nb;
...@@ -260,6 +270,7 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req) ...@@ -260,6 +270,7 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req)
AssertFatal(ulsch_harq->processedSegments+nb == rdata->nbSegments,"processed: %d, aborted: %d, total %d\n", AssertFatal(ulsch_harq->processedSegments+nb == rdata->nbSegments,"processed: %d, aborted: %d, total %d\n",
ulsch_harq->processedSegments, nb, rdata->nbSegments); ulsch_harq->processedSegments, nb, rdata->nbSegments);
ulsch_harq->processedSegments=rdata->nbSegments; ulsch_harq->processedSegments=rdata->nbSegments;
#endif
} }
} }
...@@ -350,6 +361,12 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req) ...@@ -350,6 +361,12 @@ void nr_postDecode(PHY_VARS_gNB *gNB, notifiedFIFO_elt_t *req)
void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH_id, uint8_t harq_pid) void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH_id, uint8_t harq_pid)
{ {
#ifdef TASK_MANAGER
trigger_and_spin(&gNB->man);
#endif
NR_DL_FRAME_PARMS *frame_parms = &gNB->frame_parms; NR_DL_FRAME_PARMS *frame_parms = &gNB->frame_parms;
nfapi_nr_pusch_pdu_t *pusch_pdu = &gNB->ulsch[ULSCH_id].harq_process->ulsch_pdu; nfapi_nr_pusch_pdu_t *pusch_pdu = &gNB->ulsch[ULSCH_id].harq_process->ulsch_pdu;
...@@ -408,15 +425,19 @@ void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH ...@@ -408,15 +425,19 @@ void nr_ulsch_procedures(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int ULSCH
start_meas(&gNB->ulsch_decoding_stats); start_meas(&gNB->ulsch_decoding_stats);
nr_ulsch_decoding(gNB, ULSCH_id, gNB->pusch_vars[ULSCH_id].llr, frame_parms, pusch_pdu, frame_rx, slot_rx, harq_pid, G); nr_ulsch_decoding(gNB, ULSCH_id, gNB->pusch_vars[ULSCH_id].llr, frame_parms, pusch_pdu, frame_rx, slot_rx, harq_pid, G);
#ifndef TASK_MANAGER
if (enable_ldpc_offload == 0) { if (enable_ldpc_offload == 0) {
while (gNB->nbDecode > 0) { while (gNB->nbDecode > 0) {
notifiedFIFO_elt_t *req = pullTpool(&gNB->respDecode, &gNB->threadPool); notifiedFIFO_elt_t *req = pullTpool(&gNB->respDecode, &gNB->threadPool);
if (req == NULL) if (req == NULL)
break; // Tpool has been stopped break; // Tpool has been stopped
nr_postDecode(gNB, req); nr_postDecode(gNB, req);
delNotifiedFIFO_elt(req); delNotifiedFIFO_elt(req);
} }
} }
#endif
stop_meas(&gNB->ulsch_decoding_stats); stop_meas(&gNB->ulsch_decoding_stats);
} }
......
...@@ -67,6 +67,8 @@ ...@@ -67,6 +67,8 @@
#include "PHY/NR_REFSIG/ul_ref_seq_nr.h" #include "PHY/NR_REFSIG/ul_ref_seq_nr.h"
#include <openair3/ocp-gtpu/gtp_itf.h> #include <openair3/ocp-gtpu/gtp_itf.h>
#include "executables/nr-uesoftmodem.h" #include "executables/nr-uesoftmodem.h"
#include "common/utils/thread_pool/task_manager.h"
//#define DEBUG_ULSIM //#define DEBUG_ULSIM
const char *__asan_default_options() const char *__asan_default_options()
...@@ -586,6 +588,10 @@ int main(int argc, char **argv) ...@@ -586,6 +588,10 @@ int main(int argc, char **argv)
gNB->ofdm_offset_divisor = UINT_MAX; gNB->ofdm_offset_divisor = UINT_MAX;
initNotifiedFIFO(&gNB->respDecode); initNotifiedFIFO(&gNB->respDecode);
#ifdef TASK_MANAGER
init_task_manager(&gNB->man, threadCnt);
#endif
initFloatingCoresTpool(threadCnt, &gNB->threadPool, false, "gNB-tpool"); initFloatingCoresTpool(threadCnt, &gNB->threadPool, false, "gNB-tpool");
initNotifiedFIFO(&gNB->respDecode); initNotifiedFIFO(&gNB->respDecode);
initNotifiedFIFO(&gNB->L1_tx_free); initNotifiedFIFO(&gNB->L1_tx_free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment