Commit aef86359 authored by Robert Schmidt's avatar Robert Schmidt

CI: increase max number of concurrent workers to 64

According to the ThreadPoolExecutor documentation [1], the default
number of maximum workers min(32, os.cpu_count() + 4). The problem is
that we execute many pipelines from nano, on which os.cpu_count()
reports 4, so we can only have up to 8 concurrent workers by default.
This leads to the fact that on pipelines with many UEs (e.g.,
AW2S-AmariUE), we don't ping/iperf on all UEs at the same time.

Fix this by increasing the number of workers to 64. This should be fine,
as there is almost no real work being done in the Workers (only opening
a shell and execute programs such as ping/iperf).

[1] https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor
parent 69a84825
...@@ -394,7 +394,7 @@ class OaiCiTest(): ...@@ -394,7 +394,7 @@ class OaiCiTest():
def InitializeUE(self, HTML): def InitializeUE(self, HTML):
ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids] ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids]
messages = [] messages = []
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.initialize) for ue in ues] futures = [executor.submit(ue.initialize) for ue in ues]
for f, ue in zip(futures, ues): for f, ue in zip(futures, ues):
uename = f'UE {ue.getName()}' uename = f'UE {ue.getName()}'
...@@ -609,7 +609,7 @@ class OaiCiTest(): ...@@ -609,7 +609,7 @@ class OaiCiTest():
def AttachUE(self, HTML, RAN, EPC, CONTAINERS): def AttachUE(self, HTML, RAN, EPC, CONTAINERS):
ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)] ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)]
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.attach) for ue in ues] futures = [executor.submit(ue.attach) for ue in ues]
attached = [f.result() for f in futures] attached = [f.result() for f in futures]
futures = [executor.submit(ue.checkMTU) for ue in ues] futures = [executor.submit(ue.checkMTU) for ue in ues]
...@@ -624,7 +624,7 @@ class OaiCiTest(): ...@@ -624,7 +624,7 @@ class OaiCiTest():
def DetachUE(self, HTML): def DetachUE(self, HTML):
ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)] ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)]
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.detach) for ue in ues] futures = [executor.submit(ue.detach) for ue in ues]
[f.result() for f in futures] [f.result() for f in futures]
messages = [f"UE {ue.getName()}: detached" for ue in ues] messages = [f"UE {ue.getName()}: detached" for ue in ues]
...@@ -632,7 +632,7 @@ class OaiCiTest(): ...@@ -632,7 +632,7 @@ class OaiCiTest():
def DataDisableUE(self, HTML): def DataDisableUE(self, HTML):
ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids] ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids]
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.dataDisable) for ue in ues] futures = [executor.submit(ue.dataDisable) for ue in ues]
status = [f.result() for f in futures] status = [f.result() for f in futures]
if all(status): if all(status):
...@@ -645,7 +645,7 @@ class OaiCiTest(): ...@@ -645,7 +645,7 @@ class OaiCiTest():
def DataEnableUE(self, HTML): def DataEnableUE(self, HTML):
ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids] ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids]
logging.debug(f'disabling data for UEs {ues}') logging.debug(f'disabling data for UEs {ues}')
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.dataEnable) for ue in ues] futures = [executor.submit(ue.dataEnable) for ue in ues]
status = [f.result() for f in futures] status = [f.result() for f in futures]
if all(status): if all(status):
...@@ -659,7 +659,7 @@ class OaiCiTest(): ...@@ -659,7 +659,7 @@ class OaiCiTest():
ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids] ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids]
logging.debug(f'checking status of UEs {ues}') logging.debug(f'checking status of UEs {ues}')
messages = [] messages = []
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.check) for ue in ues] futures = [executor.submit(ue.check) for ue in ues]
messages = [f.result() for f in futures] messages = [f.result() for f in futures]
HTML.CreateHtmlTestRowQueue('NA', 'OK', messages) HTML.CreateHtmlTestRowQueue('NA', 'OK', messages)
...@@ -766,7 +766,7 @@ class OaiCiTest(): ...@@ -766,7 +766,7 @@ class OaiCiTest():
ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)] ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)]
logging.debug(ues) logging.debug(ues)
pingLock = Lock() pingLock = Lock()
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(self.Ping_common, EPC, ue, RAN, CONTAINERS, pingLock) for ue in ues] futures = [executor.submit(self.Ping_common, EPC, ue, RAN, CONTAINERS, pingLock) for ue in ues]
results = [f.result() for f in futures] results = [f.result() for f in futures]
# each result in results is a tuple, first member goes to successes, second to messages # each result in results is a tuple, first member goes to successes, second to messages
...@@ -855,7 +855,7 @@ class OaiCiTest(): ...@@ -855,7 +855,7 @@ class OaiCiTest():
ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)] ues = [cls_module.Module_UE(ue_id, server_name) for ue_id, server_name in zip(self.ue_ids, self.nodes)]
svr = cls_module.Module_UE(self.svr_id,self.svr_node) svr = cls_module.Module_UE(self.svr_id,self.svr_node)
logging.debug(ues) logging.debug(ues)
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(self.Iperf_Module, EPC, ue, svr, RAN, i, len(ues), CONTAINERS) for i, ue in enumerate(ues)] futures = [executor.submit(self.Iperf_Module, EPC, ue, svr, RAN, i, len(ues), CONTAINERS) for i, ue in enumerate(ues)]
results = [f.result() for f in futures] results = [f.result() for f in futures]
# each result in results is a tuple, first member goes to successes, second to messages # each result in results is a tuple, first member goes to successes, second to messages
...@@ -1179,7 +1179,7 @@ class OaiCiTest(): ...@@ -1179,7 +1179,7 @@ class OaiCiTest():
def TerminateUE(self, HTML): def TerminateUE(self, HTML):
ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids] ues = [cls_module.Module_UE(n.strip()) for n in self.ue_ids]
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
futures = [executor.submit(ue.terminate) for ue in ues] futures = [executor.submit(ue.terminate) for ue in ues]
archives = [f.result() for f in futures] archives = [f.result() for f in futures]
archive_info = [f'Log at: {a}' if a else 'No log available' for a in archives] archive_info = [f'Log at: {a}' if a else 'No log available' for a in archives]
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment