我想同时在不同的处理器上运行不同的可执行文件,所以我从这里举了例子: 有没有办法使用Python将不同的作业(进程)分配给Linux中的特定核心? 并尝试运行可执行文件,但是 FOR 循环等待可执行文件完成以分配新进程。有没有办法将新进程分配给下一个核心而无需等待?
import os
from itertools import cycle
import multiprocessing as mp
import subprocess
def map_hack(AFF):
my_pid = os.getpid()
old_aff = os.sched_getaffinity(0)
os.sched_setaffinity(0,AFF)
return (my_pid, old_aff, os.sched_getaffinity(0))
PROCESSES = os.cpu_count()
_mycpus = cycle(os.sched_getaffinity(0))
cpus = [[next(_mycpus)] for x in range(PROCESSES)]
with mp.Pool(processes=PROCESSES) as pool:
for x in pool.map(map_hack, cpus, chunksize=1):
print("Try 2:My pid is {} and my old aff was {}, my new aff is {}".format(*x))
z = str( x[2])
z = z[:-1]
z = int( z[-1:])
print (z)
if (z == 0):
print("program waits for this executable to finish")
subprocess.run(["./exevision"])
pool.close()
pool.join()
试试这个:
import os
from itertools import cycle
import multiprocessing as mp
import subprocess
def map_hack(AFF):
my_pid= os.getpid()
old_aff = os.sched_getaffinity(0)
os.sched_setaffinity(0, AFF)
print("My pid is {} and my old aff was {}, my new aff is {}".format(
my_pid, old_aff,os.sched_getaffinity(0)))
subprocess.run(["./exevision"])
return (my_pid,old_aff, os.sched_getaffinity(0))
PROCESSES = os.cpu_count()
_mycpus= cycle(os.sched_getaffinity(0))
cpus = [[next(_mycpus)] for _ in range(PROCESSES)]
with mp.Pool(processes=PROCESSES) as pool:
results =[pool.apply_async(map_hack, args=(affinity,)) for affinity in cpus]
for res in results:
res.get()
pool.close()
pool.join()