def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: job = cloud.call(cloud_run, code, self.locals) cloud.join(job) result = cloud.result(job) self.locals.update(result) info = cloud.info(job, ['stderr', 'stdout'])[job] sys.stdout.write(info['stdout']) sys.stderr.write(info['stderr']) except SystemExit: raise except KeyboardInterrupt: raise OperationAborted('Interrupted') except cloud.CloudException, e: self.showcloudtraceback(e)
def review_jobs(label, jobs): info = cloud.info(jobs, ['runtime']) runtime = 0 count = len(info) tMin = 50000 tMax = 0 for value in info.values(): time = value['runtime'] if (time < tMin): tMin = time if (time > tMax): tMax = time runtime += time mean = runtime / count print "%s : %s : %s : %s : %s" % (label, runtime, tMin, tMax, mean)
def test_exception3(): '''Raise TypeError since cloud.info called with 2 invalid arguments''' cloud.info("asdf","sadf")
def test_exception2(): '''Raise TypeError since cloud.info called with 1 invalid argument''' cloud.info("asdf")
def test_exception1(): '''Raise TypeError since cloud.info called without arguments''' cloud.info()
def test_info_all(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['status','stdout','stderr','logging','pilog','exception','runtime','created','finished','env','vol','function','label','args','kwargs','func_body','attributes','profile','memory','cputime','cpu_percentage','ports'] ) obj = result[jid] assert obj['status'] == 'done'
def cold_start_fold(data_file, data_dir, model_class, exp_params, model_params): """Performs a timing run and then sends random restarts to picloud""" # Load data data = utils.data.load_cold_start_data(data_file) truth = data["truth"] # Perform a timing run job_id = cloud.call( cold_start_timing_run, data, model_class, exp_params, model_params, _max_runtime=3 * exp_params["max_initial_run_time"] / 60, _env=cloud_environment, _type=exp_params["core_type"], _cores=exp_params["cores_per_job"], ) result = cloud.result(job_id) # if not exp_params['local_computation']: # job_id = cloud.call(cold_start_timing_run, data, model_class, exp_params, model_params, \ # _max_runtime=3*exp_params['max_initial_run_time']/60, _env=cloud_environment, _type=exp_params['core_type'], _cores=exp_params['cores_per_job']) # result = cloud.result(job_id) # else: # result = cold_start_timing_run(data, model_class, exp_params, model_params) runtime = result["runtime"] if not exp_params["local_computation"]: max_memory = cloud.info(job_id, ["memory"])[job_id]["memory.max_usage"] else: max_memory = result["max_memory"] # Map random restarts to picloud exp_params["intermediate_iter"] = max( 1, int(round(0.9 * exp_params["max_sample_time"] / (exp_params["n_samples"] * result["time_per_mh_iter"]))) ) job_ids = cloud.map( cold_start_single_run, itertools.repeat(data, exp_params["n_restarts"]), itertools.repeat(model_class, exp_params["n_restarts"]), itertools.repeat(exp_params, exp_params["n_restarts"]), itertools.repeat(model_params, exp_params["n_restarts"]), _max_runtime=2 * (exp_params["max_burn_time"] + exp_params["max_sample_time"]) / 60, _env=cloud_environment, _type=exp_params["core_type"], _cores=exp_params["cores_per_job"], ) # Collate results results = cloud.result(job_ids, ignore_errors=True) # if not exp_params['local_computation']: # job_ids = cloud.map(cold_start_single_run, itertools.repeat(data, exp_params['n_restarts']), \ # itertools.repeat(model_class, exp_params['n_restarts']), \ # itertools.repeat(exp_params, exp_params['n_restarts']), \ # itertools.repeat(model_params, exp_params['n_restarts']), \ # _max_runtime=2*(exp_params['max_burn_time']+exp_params['max_sample_time'])/60, _env=cloud_environment, \ # _type=exp_params['core_type'], _cores=exp_params['cores_per_job']) # # Collate results # results = cloud.result(job_ids, ignore_errors=True) # else: # print 'Performing true runs' # results = [cold_start_single_run(data, model_class, exp_params, model_params) for dummy in range(exp_params['n_restarts'])] ess_sum = 0 first_result = True for i, result in enumerate(results): if not isinstance(result, Exception): if first_result: overall_prediction = result["predictions"] first_result = False else: overall_prediction = np.column_stack([overall_prediction, result["predictions"]]) runtime += result["runtime"] if not exp_params["local_computation"]: max_memory = max(max_memory, cloud.info(job_ids[i], ["memory"])[job_ids[i]]["memory.max_usage"]) else: max_memory = max(max_memory, result["max_memory"]) ess_sum += result["ess"] else: print "Warning, job %d failed" % (i + 1) if not isinstance(overall_prediction, list): # This will occur when only one successful restart overall_prediction = list(overall_prediction.mean(axis=1)) # Score results roc_data = [] for (true_link, prediction) in zip(truth, overall_prediction): roc_data.append((true_link, prediction)) AUC = ROCData(roc_data).auc() # Pickle results overall_results = { "runtime": runtime, "max_memory": max_memory, "AUC": AUC, "ess": ess_sum, "runtime": runtime, "raw_results": results, "data_file": data_file, "model_class": model_class, "exp_params": exp_params, "model_params": model_params, } save_file_name = os.path.join( exp_params["results_dir"], model_class(**model_params).description(), os.path.splitext(os.path.split(data_file)[-1])[0] + ".pickle", ) save_file_dir = os.path.split(save_file_name)[0] if not os.path.isdir(save_file_dir): os.makedirs(save_file_dir) with open(save_file_name, "wb") as save_file: pickle.dump(overall_results, save_file, -1) return overall_results
def test_info_finished(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['finished'] ) obj = result[jid] assert obj['finished'] is not None
def test_info_exception(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['exception'] ) obj = result[jid] assert obj['exception'] == None
def test_info_args(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['args'] ) obj = result[jid] assert obj['args'] is not None
def test_info_function(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['function'] ) obj = result[jid] assert obj['function'] is not None
# <codecell> # this will block until job is done or errors out cloud.join(jid) # <codecell> # get your result cloud.result(jid) # <codecell> # get some basic info cloud.info(jid) # <codecell> # get some specific info cloud.info(jid, info_requested=['created', 'finished', 'runtime', 'cputime']) # <headingcell level=1> # What I got the first time # <markdowncell> # I had to retry 2 jobs # # * https://www.picloud.com/accounts/jobs/#/?ujid=344 -> read timed out
if args.sync==0: print "synchronize cloud\'s folder to local folder and exit;" cloud.volume.sync('dna-db:', './dna-db') exit() elif args.sync==1: print "synchronize local folder to cloud\'s folder and exit;" cloud.volume.sync('./dna-db', 'dna-db:') exit() if args.pi: print "running on cloud" path_="/home/picloud/dna-db/" if args.gk: bitlength=args.gk jid=cloud.call(generate_keys, path_, bitlength, _vol="dna-db"); cloud.join(jid) print 'job function took %0.3f ms' % (cloud.info(jid).get(jid).get('runtime')*1000) print cloud.info(jid).get(jid).get('stdout').strip() print cloud.result(jid) if args.gd: n=args.gd[0] m=args.gd[1] jid=cloud.call(generate_database, path_, file_, n, m, _vol="dna-db"); cloud.join(jid) print 'job took %0.3f s' % (cloud.info(jid).get(jid).get('runtime')) print cloud.info(jid).get(jid).get('stdout').strip() print cloud.result(jid) if args.ed==0: print "encrypt database using binary mode" jid=cloud.call(encrypt_database_0, path_, file_, file_e_0, _vol="dna-db") cloud.join(jid) print 'job took %0.3f s' % (cloud.info(jid).get(jid).get('runtime'))
import cloud import sys def foo(): print "Output" print >> sys.stderr, "An Error" sys.stdout.flush() sys.stderr.flush() jid = cloud.call(foo) cloud.join(jid) cloud.info(jid, ['stderr', 'stdout'] ) #returns {jid: {'stderr': 'An Error\n', 'stdout': 'Output\n'}} result = cloud.info(jid, ['stdout', 'memory.failcnt', 'cputime.user']) print result
# <codecell> # this will block until job is done or errors out cloud.join(jid) # <codecell> # get your result cloud.result(jid) # <codecell> # get some basic info cloud.info(jid) # <codecell> # get some specific info cloud.info(jid, info_requested=['created', 'finished', 'runtime', 'cputime']) # <headingcell level=1> # What I got the first time # <markdowncell> # I had to retry 2 jobs # # * https://www.picloud.com/accounts/jobs/#/?ujid=344 -> read timed out
def test_info_status(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['status'] ) obj = result[jid] assert obj['status'] == 'done'
def test_info_stderr(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['stderr'] ) obj = result[jid] assert obj['stderr'] == None
def test_info_memory(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['memory.max_usage'] ) obj = result[jid] assert obj['memory.max_usage'] > 0
def test_infoi_runtime(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['runtime'] ) obj = result[jid] assert obj['runtime'] > 0
def test_info_cputime(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['cputime.user'] ) obj = result[jid] assert obj['cputime.user'] > 0
def test_info_env(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['env'] ) obj = result[jid] assert obj['env'] == ''
def test_info_cpu_percentage(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['cpu_percentage'] ) obj = result[jid] assert obj['cpu_percentage.system'] == None