def job1_escape_flux(self, parameterset): A = parameterset['A'] B = parameterset['B'] #next_interface = parameterset['next_interface'] act_lambda = parameterset['act_lambda'] seed = parameterset['seed'] try: parent_id = parameterset['rp_id'] except: parent_id = 'escape' if 'uuid' in parameterset: uuid = parameterset['uuid'] else: uuid = '' print('Calculating escape flux: ' + str(A) + ', ' + str(B)) all_meta = {} success = False rcvals=[] points = [] q = 0 h = harness(self.cli.exec_name, self.cli.harness_path + "/job_script", self) # Wrap the code that uses threading/subprocesses # in a try-catch to clean up on interrupts, ctrl-C etc. try: # start loading the input pipes for the MD process use_existing_point = False # Checking for previous points if 'random_points' in parameterset: print("Random points key in paramset") if not 'None' in str(parameterset['random_points']): # Use previous point use_existing_point = True if use_existing_point: # we are at least on A... if 'equilibrate_point' in parameterset: comefromok = True else: comefromok = False h.send( True, True, True, parameterset['random_points'] ) optionlist = "-tmpdir " + h.tmpdir + \ " -initial_config None" + \ " -in_fifoname " + h.crds_in_fifoname + \ " -back_fifoname " + h.crds_back_fifoname + \ " -metadata_fifoname " + h.metadata_fifoname + \ " -halt_steps 0 " + \ " -check_rc_every 1" + \ self.build_options(parameterset) h.subthread_run_script(optionlist) else: # we assume that the simulation is set up in A if no # last successful point is received comefromok = True h.send( False, True, True, ['None'] ) optionlist = "-tmpdir " + h.tmpdir + \ " -initial_config " + self.cli.harness_path + "/initial_config.dat" + \ " -back_fifoname " + h.crds_back_fifoname + \ " -metadata_fifoname " + h.metadata_fifoname + \ " -halt_steps 0" + \ " -check_rc_every 1" + \ self.build_options(parameterset) h.subthread_run_script(optionlist) calcsteps = 0 ctime = 0 while True: # read output from the MD subthread steps, time, rc, all_meta = h.collect( points, rcvals ) if 'quit' in all_meta: raise SystemExit(all_meta['quit']) calcsteps += steps ctime += time print("Client: collected MD output" + str((steps, time, rc))) flRc = float(rc) if 'step_abort' in all_meta: if all_meta['step_abort']: print("Client: job was aborted because of maximum steps.") success = False elif 'reached_B_escape' in all_meta: if all_meta['reached_B_escape']: print("Escape job reached B, asking server for new config") success = False else: success = True else: success = True if self.cli.checking_script > 0: break else: # Verify that escape conditions have been met. # This is necessary for simulation tools # which do not do this logic themselves # ...but can cause problems if they *do* # perform the logic themselves if flRc >= float(B): print("Client: reached B, resetting") success = False break elif flRc >= float(A) and comefromok: print("Client: reached interface coming from A, saving point.") comefromok = False success = True break elif flRc < float(A) and not comefromok: print("Client: has fallen back to A") comefromok = True if parameterset['max_steps'] > 0: if calcsteps >= parameterset['max_steps']: success = False break ## print("Client: continuing, with rc: "+str(flRc)+" of "+str(A)+", "+str(B)) # Start a new sender to write out the data that we just recieved. # Assuming that it is safe to both read and write from points, because all # simulation programs will complete reading their input # before they write their output. h.send( True, True, True, points[-1] ) optionlist = "-tmpdir " + h.tmpdir + \ " -initial_config None" + \ " -in_fifoname " + h.crds_in_fifoname + \ " -back_fifoname " + h.crds_back_fifoname + \ " -metadata_fifoname " + h.metadata_fifoname + \ " -halt_steps 0" + \ " -check_rc_every 1" + \ self.build_options(parameterset) # fork a subthread to run the MD, starting from the crds_in fifo. h.subthread_run_script(optionlist) except Exception as e: print( "Client: exception while running harness, %s" % e ) h.clean() raise SystemExit(e) h.clean() print("Client: Constructing result string") if success: results_base = "\"jobtype\": 1, \"success\": True, \"points\": " + str(points[-1]) else: results_base = "\"jobtype\": 1, \"success\": False" results_base += ", \"ctime\": " + str(ctime) + \ ", \"seed\": " + str(seed) + \ ", \"act_lambda\": " + str(act_lambda) + \ ", \"calcsteps\": " + str(calcsteps) + \ ", \"origin_points\": \"" + str(parent_id) + "\"" + \ ", \"rcval\": " + str(flRc) + \ ", \"uuid\": \"" + uuid + "\"" #print("Resultstring before appending:", results_base) results = self.build_custominfo(results_base, all_meta) #print("Resultstring after appending:", results) return "{" + results + "}"
def job3_fixed_tau(self, parameterset): if self.prinlevel > 0: print("parameters:") sys.stdout.flush() print(str(parameterset)[0:128]+"...") sys.stdout.flush() ##expected parameters for this job type tau = self.safeAssign(parameterset, 'halt_steps') seed = self.safeAssign(parameterset, 'seed') currentLambda = self.safeAssign(parameterset, 'seed') parent_id = self.safeAssign(parameterset, 'rp_id') seed = self.safeAssign(parameterset, 'seed') currentlambda = self.safeAssign(parameterset, 'currentlambda') parentlambda = self.safeAssign(parameterset, 'parentlambda') uuid = self.safeAssign(parameterset, 'uuid') check_rc_every = self.safeAssign(parameterset, 'check_rc_every') if check_rc_every is None: check_rc_every = "0" save_configs = self.safeAssign(parameterset, 'save_configs') if save_configs is None: save_configs = "0" rcvals = [] ctime = [] all_meta = {} t = 0.0 num_steps = 0 ctime_tot = 0.0 step = tau results = "{\"jobtype\": 3, \"success\": False, \"currentlambda\": " + str(currentlambda) + \ ", \"parentlambda\": " + str(parentlambda) + \ ", \"origin_points\": " + str(parent_id) + \ " }" ##create a harness object to manage comms with the subtask. h = harness(self.cli.exec_name, self.cli.harness_path+"/job_script", self ) tmp_seed = int(seed) points=parameterset['random_points'] while num_steps < tau : print("Using temp dir: "+str(h.tmpdir)) optionlist = "" if parent_id == "0" and num_steps == 0: start_config = self.cli.initial_config_path in_fifo = "None" get_coords = True send_coords = False if os.path.isfile(start_config): print("client: Found file at: "+start_config) print("client: Treating input coords as NULL, loading from file.") else: print("client: Treating input coords as NULL:\n"+\ "client: could not find input file: "+str(start_config)+"\n"\ "client: hoping that the harness will generate one!") start_config = "None" else: ##assume that saving to file means reading from a file if save_configs != "0": start_config = points[0][0] get_coords = True ##only open a read fifo, to get the filename of the crds send_coords = False in_fifo = "None" else: start_config = "None" get_coords = True ##open two fifos, send and receive send_coords = True in_fifo = h.crds_in_fifoname optionlist = " -tmpdir " + h.tmpdir + \ " -in_fifoname " + in_fifo + \ " -initial_config "+ start_config + \ " -back_fifoname " + h.crds_back_fifoname + \ " -metadata_fifoname " + h.metadata_fifoname + \ " -seed " + str(tmp_seed) if check_rc_every == "0": optionlist += " -check_rc_every 0 " ##pass the remaining options from the parameterset straight through ##....but with some exclusions. optionlist += self.build_options(parameterset,\ ["jobtype","halt_rc_upper", "halt_rc_lower", "save_configs"]) else: ##pass the remaining options from the parameterset straight through ##....but with some exclusions. optionlist += self.build_options(parameterset, ["jobtype"]) ##are we just saving state locally and sending a path back to the server? if save_configs != "0": optionlist += " -coords_to_file "+save_configs+"/"+uuid print("client: optionlist: "+str(optionlist)[0:128]+"...") ##Wrap the code that uses threading/subprocesses ##in a try-catch to clean up on interrupts, ctrl-C etc. try: h.send( send_coords, get_coords, True, points ) h.subthread_run_script(optionlist) pp=[] calcsteps, ctime, rc, all_meta = h.collect( pp, rcvals ) except e: print( "Client: exception while runnning harness, %s" % e ) h.clean() exit( e ) num_steps = num_steps + calcsteps ctime_tot += ctime if str(check_rc_every) != "0" : #tmp_seed += 1 if rc >= B: print( "Client: ending run, rc: "+str(rc) + " reached B: "+str(B)) rcvals = [rc] break elif num_steps >= tau: print( "Client: ending run, steps " + str(num_steps) + " reached tau: " + str(tau) ) rcvals = [rc] break else: if send_coords == False: send_coords = True start = h.crds_in_fifoname ##recycle the input points points = pp[0] ##clean up the fifos. h.clean() ##build the return packet results="\"jobtype\": 3, \"success\": True" + \ ", \"seed\": "+ str(seed) + \ ", \"parentlambda\": " + str(currentlambda) + \ ", \"newlambda\": " + str(rcvals) + \ ", \"origin_points\": \"" + str(parent_id) +"\""+ \ ", \"calcsteps\": " + str(num_steps) + \ ", \"ctime\": " + str(ctime_tot) + \ ", \"points\": " + str(pp) + \ ", \"uuid\": \"" + uuid + "\"" results = self.build_custominfo(results, all_meta) print("client returning results packet: "+str(results)[0:128]+"..."+str(results[len(results)-64:len(results)])) return "{" + results + "}"
def job2_probabilities(self, parameterset): A = parameterset['A'] next_interface = parameterset['next_interface'] act_lambda = parameterset['act_lambda'] seed = parameterset['seed'] parent_id = parameterset['rp_id'] points = [] rcvals = [] ctime = 0 calcsteps = 0 t = 0.0 i = 0 if 'uuid' in parameterset: uuid = parameterset['uuid'] else: uuid = '' all_meta = {} h = harness(self.cli.exec_name, self.cli.harness_path+"/job_script", self) # Wrap the code that uses threading/subprocesses # in a try-catch to clean up on interrupts, ctrl-C etc. try: print("sending: "+str(parameterset['random_points'])[0:64]) # start loading the input pipes for the MD process h.send( True, True, True, parameterset['random_points'] ) calcsteps = 0 ctime = 0 while True: optionlist = "-tmpdir " + h.tmpdir + \ " -initial_config None" + \ " -in_fifoname " + h.crds_in_fifoname + \ " -back_fifoname " + h.crds_back_fifoname + \ " -metadata_fifoname " + h.metadata_fifoname + \ " -halt_steps 0" + \ " -check_rc_every 1" + \ self.build_options(parameterset) # fork a subthread to run the MD h.subthread_run_script(optionlist) # read output from the MD subthread steps, time, rc, all_meta = h.collect( points, rcvals ) calcsteps += steps ctime += time flRc = float(rc) if self.cli.checking_script > 0: break else: # Verify that the conditions have been met. # This is necessary for simulation tools # which do not do this logic themselves if flRc <= A: break elif flRc >= next_interface: break # Start a new sender to write out the data that we just recieved. # Assuming that it is safe to both read and write from points, because all # simulation programs will complete reading their input # before they write their output. h.send( True, True, True, points[-1] ) except e: print( "Cient: exception while running harness, %s" % e ) h.clean() exit( e ) h.clean() # only build a full results packet if we have a success if flRc >= next_interface: results_base = "\"jobtype\": 2, \"success\": True, \"points\": " + str(points[-1]) else: results_base = "\"jobtype\": 2, \"success\": False" results_base += ", \"act_lambda\": " + str(act_lambda)+ \ ", \"seed\": " + str(seed) + \ ", \"origin_points\": \"" + str(parent_id) + "\"" + \ ", \"calcsteps\": " + str(calcsteps) + \ ", \"ctime\": " + str(ctime) + \ ", \"rcval\": " + str(flRc) + \ ", \"uuid\": \"" + uuid + "\"" #print("Resultstring before appending:", results_base) results = self.build_custominfo(results_base, all_meta) #print("Resultstring after appending:", results) return "{" + results + "}"