def dispense_job_2_worker(self, job=None): global tmp_queue if not job: return for i in job["cmds"]: if len(processing.activeChildren()) >= setting.MAX_PROCESS: print "xxxx : process numbers is very big.", processing.activeChildren() print "xxxx : numbers cmds.", job["cmds"] self.set_busy_job(i, "Client is busy now: jobs(all) numbers >= max_process(%s)."%setting.MAX_PROCESS) continue elif check_load_limit(i): print "xxxx : process numbers is very big.", processing.activeChildren() print "xxxx : numbers cmds.", job["cmds"] self.set_busy_job(i, "Client is busy now: jobs(encoder) numbers >= CPUS(%s)."%cpuCount) continue ccode, creq=self.check_arguments(i) # add new job to cmdStatus _put(tmp_queue, creq) if ccode:# ok # start new process do this. self.new_worker(creq)
def control(): """ """ # init env clear_env() # do # loop check which thrad die, and than to start new thread, OK new_one_process(log, "log_process") new_one_process(monitor, "monitor_process") cmdStatus={} while True: print "#######"*20 print "cmdStatus :", cmdStatus print "#######"*20 Communication(cmdStatus).run() print "activeChildren :", processing.activeChildren() [e.join(0.1) for e in processing.activeChildren()] for p in ("log_process", "monitor_process"): if not G_process[p].isAlive(): GenLog("ERROR", "%s exit."%p) sys.exit()
def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print '\n\t######## %s\n' % func.__name__ func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print info raise ValueError, 'there should be no positive refcounts left'
def report_server(self): global log_queue # sleep time.sleep(VISIT_TIME) # dplayer check storage left. no_left_storage() # reload code_changed() # request body new_job_request_body=self.get_request_body() istr=urllib.urlencode(new_job_request_body) hostname, port, baseuri, proxy=setting.HOSTNAME, setting.PORT, setting.BASEURI_NEWJOB, setting.PROXY server = hostname + ":" + str(port) print "======"*20 print "server :", server print "baseuri:", baseuri print "new_job_request_body:",new_job_request_body print "======"*20 _put(log_queue, "[send-request-ok] server=%s baseuri=%s body=%s"%(server, baseuri, new_job_request_body)) req_code, req =netutil.client_post(server, baseuri, istr, proxy) _put(log_queue, "[processing.activeChildren()] %s"%(processing.activeChildren())) # test ls #req_code, req=True, json.write({"global":{"touch_interval":5,"health_report_interval":20},"cmds":[{"cid":N, "tid":0,"type":"ls","param":{"path":"/home", "timeout":10*60}),} ],} # test pic #req_code, req=True, json.write({"global":{"touch_interval":5,"health_report_interval":20},"cmds":[{"cid":N,"tid":0,"type":"pic","param":{"path":"/home/mps-test/core/v2-mps/encoder/benchmark/src.flv", "desc_dir":"/home/tommy/Client/__TMP/pic_%s"%Tid, "pic_type":"normal", "timeout":10*60},} ],}) # test analyze_src #req_code, req=True, json.write({"global":{"touch_interval":5,"health_report_interval":20},"cmds":[{"cid":N,"tid":0,"type":"analyze_src","param":{"src":"/home/mps-test/core/v2-mps/encoder/benchmark/src.flv", "timeout":600},} ],}) # test delete_file #req_code, req=True, json.write({"global":{"touch_interval":5,"health_report_interval":20},"cmds":[{"cid":N,"tid":0,"type":"delete_file","param":{"path":"/home/mps-test/core/v2-mps/encoder/benchmark/src.flv", "timeout":600},} ],}) if not req_code: print "%s getjobs Error."%time.ctime() _put(log_queue, "[get-new-job-error] %s"%req) return try: job=json.read(req) _put(log_queue, "[get-new-job-ok] %s"%req) except Exception, e: _put(log_queue, "[get-new-job-error] json.read(%s) Exception, %s"%(req, e)) return
if loge < 0: loge = "m" + str(abs(loge)) else: loge = "p" + str(abs(loge)) emin = float(row['emin']) emax = float(row['emax']) flux = float(row['crab']) # Generate model models = create_model(flux) # Processing support? if has_processing: # Wait until one thread has finished while len(processing.activeChildren()) >= max_threads: time.sleep(60) # Set arguments args = (loge, emin, emax, models) kwargs = {} # Generate pull distribution p = processing.Process(target=create_pull, args=args, kwargs=kwargs) p.start() print("Process emin=%.4f emax=%.4f started." % (emin, emax)) # Wait some time time.sleep(1)
if loge < 0: loge = "m"+str(abs(loge)) else: loge = "p"+str(abs(loge)) emin = float(row['emin']) emax = float(row['emax']) flux = float(row['crab']) # Generate model models = create_model(flux) # Processing support? if has_processing: # Wait until one thread has finished while len(processing.activeChildren()) >= max_threads: time.sleep(60) # Set arguments args = (loge, emin, emax, models) kwargs = {} # Generate pull distribution p = processing.Process(target=create_pull, args=args, kwargs=kwargs) p.start() print("Process emin=%.4f emax=%.4f started." % (emin, emax)) # Wait some time time.sleep(1) # ... no
def check_load_limit(job=None): if (len([e for e in processing.activeChildren() if e.getName().split("_")[1] in setting.LIMIT_CPU]) >= cpuCount) and (job["type"] in setting.LIMIT_CPU): return True # load >= limit, we can not fore more worker. else: return False # load < limit, we can fork more worker.