def migration(): id1=1 id2=2 thread1 = thread("Thread" + str(id1), id1) print(str(thread1.thread_name) + " " + str(thread1.thread_ID)); thread2 = thread("Thread" + str(id2), id2) print(str(thread2.thread_name) + " " + str(thread2.thread_ID)); pid=0 aff1 = affinity.get_process_affinity_mask(pid) print("Thread1 is eligibl to run on:", aff1) affinity.set_process_affinity_mask(0, 100) print("CPU affinity mask is modified for process id % s" % pid) # pid = 0 aff = affinity.get_process_affinity_mask(pid) print("Now, process for th1 is eligibl to run on:", aff) pid = 0 aff2 = affinity.get_process_affinity_mask(pid) print("Thread2 is eligibl to run on:", aff2) affinity.set_process_affinity_mask(0, 45) print("CPU affinity mask is modified for process id % s" % pid) # pid = 0 aff2 = affinity.get_process_affinity_mask(pid) print("Now, process for th2is eligibl to run on:", aff2) thread1.start() thread2.start() thread1.join() thread2.join()
def main(): from suite import benchmarks # GitRepo wants exactly 7 character hash? if args.base_commit: args.base_commit = args.base_commit[:7] if args.target_commit: args.target_commit = args.target_commit[:7] if not args.log_file: args.log_file = os.path.abspath( os.path.join(REPO_PATH, 'vb_suite.log')) saved_dir = os.path.curdir if args.outdf: # not bullet-proof but enough for us args.outdf = os.path.realpath(args.outdf) if args.log_file: # not bullet-proof but enough for us args.log_file = os.path.realpath(args.log_file) random.seed(args.seed) np.random.seed(args.seed) try: import affinity affinity.set_process_affinity_mask(0,args.affinity) assert affinity.get_process_affinity_mask(0) == args.affinity print("CPU affinity set to %d" % args.affinity) except ImportError: print("Warning: The 'affinity' module is not available.") time.sleep(2) print("\n") prprint("LOG_FILE = %s" % args.log_file) if args.outdf: prprint("PICKE_FILE = %s" % args.outdf) print("\n") # move away from the pandas root dit, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__))) benchmarks = [x for x in benchmarks if re.search(args.regex,x.name)] for b in benchmarks: b.repeat = args.repeats if args.ncalls: b.ncalls = args.ncalls if benchmarks: if args.head: profile_head(benchmarks) else: profile_comparative(benchmarks) else: print( "No matching benchmarks") os.chdir(saved_dir)
def train_individual_cpu(mean_list, sigma_list, pool, env, ARGS, refer_batch, seed): jobs = [] for idx, mean in enumerate(mean_list): sigma = sigma_list[idx] jobs = None model = build_model(ARGS) #seed = [np.random.randint(1,1000000) for i in range(ARGS.population_size)] # create multiprocessing jobs for k_id in range(ARGS.population_size): # jobs.append(pool.apply_async( # get_reward_atari, # (model,mean,sigma,env,seed[k_id],ARGS,refer_batch,None,False,False,) # )) import affinity p = mp.Process(target=get_reward_atari, args=( model, mean, sigma, env, seed[k_id], ARGS, refer_batch, None, False, False, )) p.start() cpurank = affinity.get_process_affinity_mask(p.pid) affinity.set_process_affinity_mask(p.pid, cpurank) p.join() rewards_list, frame_list, models_list, noops_list,detail_rewards_list = [],[],[],[],[] rewards, frames, models, noops, detail_rewards = [], [], [], [], [] # get reward(evaluate) for idx, j in enumerate(jobs): rewards.append(j.get()[0]) frames.append(j.get()[1]) models.append(j.get()[2]) noops.append(j.get()[3]) detail_rewards.append(j.get()[4]) for i in range(ARGS.lam): mu = ARGS.population_size rewards_list.append(rewards[i * mu:(i + 1) * mu]) frame_list.append(frames[i * mu:(i + 1) * mu]) models_list.append(models[i * mu:(i + 1) * mu]) noops_list.append(noops[i * mu:(i + 1) * mu]) detail_rewards_list.append(detail_rewards[i * mu:(i + 1) * mu]) frame_count = np.sum(np.array(frame_list)) return rewards_list, frame_count, models_list, noops_list, detail_rewards_list
def set_processor_affinity(mask, pid=None): """Sets the process_affinity to the given cores, either for the current process or the given pid. mask can be an affinity mask or list of cores. Returns success""" mask = _create_affinity_mask(mask) pid = pid or 0 previous_mask = affinity.set_process_affinity_mask(pid, mask) current_mask = affinity.get_process_affinity_mask(pid) current_mask_str = ", ".join(str(i) for i in _affinity_mask_to_list(current_mask)) if current_mask != mask: request_mask_str = ", ".join(str(i) for i in _affinity_mask_to_list(mask)) logger.warning("Set process affinity for pid %d to cores %s unsuccessful: actually set to %s", pid, request_mask_str, current_mask_str) return False else: logger.info("Set process affinity for pid %d to cores %s", pid, current_mask_str) return True
def train_parallel_cpu(mean_list, sigma_list, pool, env, ARGS, refer_batch, seed): """Evaluates all offsprings of all populations in parallel by population seperately.""" rewards_list,frame_list,models_list,noops_list,detail_rewards_list= [],[],[],[],[] for idx, mean in enumerate(mean_list): sigma = sigma_list[idx] jobs = [] model = build_model(ARGS) #seed = [np.random.randint(1,1000000) for i in range(ARGS.population_size)] # create multiprocessing jobs of population for k_id in range(ARGS.population_size): #jobs.append(pool.apply_async( # get_reward_atari, # (model,mean,sigma,env,seed[k_id],ARGS,refer_batch,None,False,False,) # )) p = mp.Process(target=get_reward_atari, args=( model, mean, sigma, env, seed[k_id], ARGS, refer_batch, None, False, False, )) p.start() cpurank[k_id] = affinity.get_process_affinity_mask(p.pid) affinity.set_process_affinity_mask(p.pid, cpurank) p.join() rewards, frames, models, noops, detail_rewards = [], [], [], [], [] for j in jobs: rewards.append(j.get()[0]) frames.append(j.get()[1]) models.append(j.get()[2]) noops.append(j.get()[3]) detail_rewards.append(j.get()[4]) rewards_list.append(rewards) frame_list.append(frames) models_list.append(models) noops_list.append(noops) detail_rewards_list.append(detail_rewards) frame_count = np.sum(np.array(frame_list)) return rewards_list, frame_count, models_list, noops_list, detail_rewards_list
def main(): from suite import benchmarks # GitRepo wants exactly 7 character hash? if args.base_commit: args.base_commit = args.base_commit[:7] if args.target_commit: args.target_commit = args.target_commit[:7] if not args.log_file: args.log_file = os.path.abspath( os.path.join(REPO_PATH, 'vb_suite.log')) saved_dir = os.path.curdir if args.outdf: # not bullet-proof but enough for us args.outdf = os.path.realpath(args.outdf) if args.log_file: # not bullet-proof but enough for us args.log_file = os.path.realpath(args.log_file) random.seed(args.seed) np.random.seed(args.seed) affinity_set = False # try psutil first since it is more commonly present and better # maintained. Some people experienced problems with affinity package # (see https://code.google.com/p/psutil/issues/detail?id=238 for more references) try: import psutil if hasattr(psutil.Process, 'set_cpu_affinity'): psutil.Process(os.getpid()).set_cpu_affinity([args.affinity]) affinity_set = True except ImportError: pass if not affinity_set: try: import affinity affinity.set_process_affinity_mask(0, args.affinity) assert affinity.get_process_affinity_mask(0) == args.affinity affinity_set = True except ImportError: pass if not affinity_set: import warnings warnings.warn("\n\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "The 'affinity' or 'psutil' >= 0.5.0 modules are not available, results may be unreliable\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n" ) time.sleep(2) else: print("CPU affinity set to %d" % args.affinity) print("\n") prprint("LOG_FILE = %s" % args.log_file) if args.outdf: prprint("PICKE_FILE = %s" % args.outdf) print("\n") # move away from the pandas root dit, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__))) benchmarks = [x for x in benchmarks if re.search(args.regex,x.name)] for b in benchmarks: b.repeat = args.repeats if args.ncalls: b.ncalls = args.ncalls if benchmarks: if args.head: profile_head(benchmarks) else: profile_comparative(benchmarks) else: print( "No matching benchmarks") os.chdir(saved_dir)
def main(): from suite import benchmarks # GitRepo wants exactly 7 character hash? if args.base_commit: args.base_commit = args.base_commit[:7] if args.target_commit: args.target_commit = args.target_commit[:7] if not args.log_file: args.log_file = os.path.abspath( os.path.join(REPO_PATH, 'vb_suite.log')) saved_dir = os.path.curdir if args.outdf: # not bullet-proof but enough for us args.outdf = os.path.realpath(args.outdf) if args.log_file: # not bullet-proof but enough for us args.log_file = os.path.realpath(args.log_file) random.seed(args.seed) np.random.seed(args.seed) try: import affinity affinity.set_process_affinity_mask(0,args.affinity) assert affinity.get_process_affinity_mask(0) == args.affinity print("CPU affinity set to %d" % args.affinity) except ImportError: import warnings print("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"+ "The 'affinity' module is not available, results may be unreliable\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n" ) time.sleep(2) print("\n") prprint("LOG_FILE = %s" % args.log_file) if args.outdf: prprint("PICKE_FILE = %s" % args.outdf) print("\n") # move away from the pandas root dit, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__))) benchmarks = [x for x in benchmarks if re.search(args.regex,x.name)] for b in benchmarks: b.repeat = args.repeats if args.ncalls: b.ncalls = args.ncalls if benchmarks: if args.head: profile_head(benchmarks) else: profile_comparative(benchmarks) else: print( "No matching benchmarks") os.chdir(saved_dir)
try: repeats = [int(t) for t in sys.argv[4].split(',')] except: print >> sys.stderr, 'Error: arguments' sys.exit() if len(repeats) == 1: repeats *= 2 elif len(repeats) > 2 or any(t <= 0 for t in repeats): print >> sys.stderr, 'Error: arguments' sys.exit() comp_repeats, decomp_repeats = repeats # Lock to a single core (reduces context switches, picks highest affinity bit) # Only available if the affinity module has been installed try: import affinity mask, i = affinity.get_process_affinity_mask(os.getpid()), -1 while mask: mask >>= 1; i += 1 affinity.set_process_affinity_mask(os.getpid(), 1 << (i-(i&1))) except: pass # Attempt to increase the priority to very high try: import win32api, win32process win32process.SetPriorityClass(-1, win32process.HIGH_PRIORITY_CLASS) win32process.SetPriorityClass(-1, win32process.REALTIME_PRIORITY_CLASS) except: try: while True: os.nice(-1) except: pass nfiles = 0
def main(): from suite import benchmarks # GitRepo wants exactly 7 character hash? if args.base_commit: args.base_commit = args.base_commit[:7] if args.target_commit: args.target_commit = args.target_commit[:7] if not args.log_file: args.log_file = os.path.abspath(os.path.join(REPO_PATH, 'vb_suite.log')) saved_dir = os.path.curdir if args.outdf: # not bullet-proof but enough for us args.outdf = os.path.realpath(args.outdf) if args.log_file: # not bullet-proof but enough for us args.log_file = os.path.realpath(args.log_file) random.seed(args.seed) np.random.seed(args.seed) affinity_set = False # try psutil first since it is more commonly present and better # maintained. Some people experienced problems with affinity package # (see https://code.google.com/p/psutil/issues/detail?id=238 for more references) try: import psutil if hasattr(psutil.Process, 'set_cpu_affinity'): psutil.Process(os.getpid()).set_cpu_affinity([args.affinity]) affinity_set = True except ImportError: pass if not affinity_set: try: import affinity affinity.set_process_affinity_mask(0, args.affinity) assert affinity.get_process_affinity_mask(0) == args.affinity affinity_set = True except ImportError: pass if not affinity_set: import warnings warnings.warn( "\n\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "The 'affinity' or 'psutil' >= 0.5.0 modules are not available, results may be unreliable\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n" ) time.sleep(2) else: print("CPU affinity set to %d" % args.affinity) print("\n") prprint("LOG_FILE = %s" % args.log_file) if args.outdf: prprint("PICKE_FILE = %s" % args.outdf) print("\n") # move away from the pandas root dit, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__))) benchmarks = [x for x in benchmarks if re.search(args.regex, x.name)] for b in benchmarks: b.repeat = args.repeats if args.ncalls: b.ncalls = args.ncalls if benchmarks: if args.head: profile_head(benchmarks) else: profile_comparative(benchmarks) else: print("No matching benchmarks") os.chdir(saved_dir)
def get_processor_affinity(pid=None): """Gets the process_affinity cores either for the current process or the given pid. Returns a list of cores""" mask = affinity.get_process_affinity_mask(pid or 0) return _affinity_mask_to_list(mask)
continue kp, kd = det.detectAndCompute(frm, None) frmc = cv2.cvtColor(frm.copy(), cv2.COLOR_GRAY2BGR) fp.plotFPoints(frmc, kp, rad=3) fkp = '%s_%s.kp' % (fname, tt) fimgOut = '%s_%s_kp.png' % (fname, tt) cv2.imwrite(fimgOut, frmc) fp.kp2file(fkp, kp, kd) print "--> %s : %s" % (tt, fkp) ############################# if __name__ == '__main__': # try: print affinity.get_process_affinity_mask(0) affinity.set_process_affinity_mask(0, 2**mp.cpu_count() - 1) except: pass # cap = ph.VideoCSVReader(fidx) cap.printInfo() taskManager = fp.TaskManager() for tt in listDet: print tt det = fp.getDetByName(tt) if det == None: print 'ERROR: incorrect detector [%s], exit...' % tt sys.exit(1) ret = True numFrames = cap.getNumFrames()
def main(): from suite import benchmarks # GitRepo wants exactly 7 character hash? if args.base_commit: args.base_commit = args.base_commit[:7] if args.target_commit: args.target_commit = args.target_commit[:7] if not args.log_file: args.log_file = os.path.abspath(os.path.join(REPO_PATH, 'vb_suite.log')) saved_dir = os.path.curdir if args.outdf: # not bullet-proof but enough for us args.outdf = os.path.realpath(args.outdf) if args.log_file: # not bullet-proof but enough for us args.log_file = os.path.realpath(args.log_file) random.seed(args.seed) np.random.seed(args.seed) if args.base_pickle and args.target_pickle: baseline_res = prep_pickle_for_total(pd.load(args.base_pickle)) target_res = prep_pickle_for_total(pd.load(args.target_pickle)) report_comparative(target_res, baseline_res) sys.exit(0) if args.affinity is not None: try: import affinity affinity.set_process_affinity_mask(0, args.affinity) assert affinity.get_process_affinity_mask(0) == args.affinity print("CPU affinity set to %d" % args.affinity) except ImportError: print( "-a/--afinity specified, but the 'affinity' module is not available, aborting.\n" ) sys.exit(1) print("\n") prprint("LOG_FILE = %s" % args.log_file) if args.outdf: prprint("PICKE_FILE = %s" % args.outdf) print("\n") # move away from the pandas root dit, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__))) benchmarks = [x for x in benchmarks if re.search(args.regex, x.name)] for b in benchmarks: b.repeat = args.repeats if args.ncalls: b.ncalls = args.ncalls if benchmarks: if args.head: profile_head(benchmarks) else: profile_comparative(benchmarks) else: print("No matching benchmarks") os.chdir(saved_dir)
continue kp,kd=det.detectAndCompute(frm, None) frmc=cv2.cvtColor(frm.copy(), cv2.COLOR_GRAY2BGR) fp.plotFPoints(frmc, kp, rad=3) fkp='%s_%s.kp' % (fname, tt) fimgOut='%s_%s_kp.png' % (fname, tt) cv2.imwrite(fimgOut, frmc) fp.kp2file(fkp, kp,kd) print "--> %s : %s" % (tt, fkp) ############################# if __name__=='__main__': # try: print affinity.get_process_affinity_mask(0) affinity.set_process_affinity_mask(0,2**mp.cpu_count()-1) except: pass # cap=ph.VideoCSVReader(fidx) cap.printInfo() taskManager=fp.TaskManager() for tt in listDet: print tt det=fp.getDetByName(tt) if det==None: print 'ERROR: incorrect detector [%s], exit...' % tt sys.exit(1) ret=True numFrames=cap.getNumFrames()
def current_affinity(PID): ca = affinity.get_process_affinity_mask(PID) print("Current affinity settings for {} = {}".format(pName, ca))
self.name = _name self.lock = _writeLock def run(self): while 1: self.lock.acquire() print str(threading.currentThread().ident) + ' ' + self.name + " Aquired lock" i = 0 while i < 100: i += 1 time.sleep(0.2) self.lock.release() writeLock = thread.allocate_lock() print str(affinity.get_process_affinity_mask(0)) t1 = TObj("First Thread", writeLock) t2 = TObj("Second Thread", writeLock) t1.start() t2.start() time.sleep(100)
if __name__ == "__main__": p1 = Process(target=wheel.standby, args=(dd, ), name='wheel') #行走系统 p2 = Process(target=controller.webconsole, args=(dd, ), name='controller') #用户操纵系统 p3 = Process(target=ultrasonic.standby, args=(dd, ), name='ultrasonic') #超声波测距系统 p4 = Process(target=camera.CameraVideo, args=(dd, ), name='Camera') #照相子系统 p1.daemon = True p2.daemon = True p3.daemon = True p4.daemon = True p1.start() p2.start() p3.start() p4.start() #针对四核,将进程分配到指定的CPU上运行 affinity.set_process_affinity_mask(p1.pid, 7L) #共用3CPU affinity.set_process_affinity_mask(p2.pid, 7L) affinity.set_process_affinity_mask(p3.pid, 7L) affinity.set_process_affinity_mask(p4.pid, 8L) #专用一路CPU print(affinity.get_process_affinity_mask(p4.pid)) p1.join() p2.join() p3.join() p4.join() print("系统停机...")