def __init__(self, url, user, token): self.port = 8000 ## change if url.find(':') == -1: # Port not included self.url = url + ':{}'.format(self.port) else: self.url = url self.user = user self.token = token self.headers = {'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain'} shockres = requests.get('http://{}/shock'.format(self.url), headers=self.headers).text self.shockurl = 'http://{}/'.format(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token)
class Runner(object): # from environment import Environment # # VARIABLES # # # METHODS # #------------------------------------------------------------------------- # __init__ #------------------------------------------------------------------------- def __init__(self): pass #------------------------------------------------------------------------- #------------------------------------------------------------------------- # initialize() #------------------------------------------------------------------------- def initialize(self, environment): self.environment = environment self.updater = Updater(self.environment) self.shocker = Shock() #------------------------------------------------------------------------- #------------------------------------------------------------------------- # do_run #------------------------------------------------------------------------- def do_run(self, measurement, debug): # loop over all time steps and do the updating for i in range(self.environment.parameters.numSweeps): # the update step self.updater.do_update(self.environment, i, debug) # check if there is a shock at the current time step if (int(self.environment.get_state(i).shockType) != 0): self.shocker.do_shock(self.environment, int(i)) self.environment.get_state(i).shockType = 0 # do the measurement measurement.do_measurement(self.environment.banks)
def initialize(self, environment): self.environment = environment self.updater = Updater(self.environment) self.shocker = Shock()
def elast_gen(bs, shocks): eta = bs[0] alphap = bs[1] alphaf = bs[2] #wage process wagep_betas = np.array([bs[3], bs[4], bs[5], bs[6], bs[7]]).reshape((5, 1)) income_male_betas = np.array([bs[8], bs[9], bs[10]]).reshape((3, 1)) c_emp_spouse = bs[11] #Production function [young[cc0,cc1],old] gamma1 = bs[12] gamma2 = bs[13] gamma3 = bs[14] tfp = bs[15] sigma2theta = 1 kappas = [bs[16], bs[17]] sigma_z = [1, bs[18]] rho_theta_epsilon = bs[19] lambdas = [1, 1] #Re-defines the instance with parameters param = util.Parameters(alphap, alphaf, mu_c, eta, gamma1, gamma2, gamma3, tfp, sigma2theta, rho_theta_epsilon, wagep_betas, income_male_betas, c_emp_spouse, marriagep_betas, kidsp_betas, eitc_list, afdc_list, snap_list, cpi, fpl_list, lambdas, kappas, pafdc, psnap, mup, sigma_z) #The estimate class output_ins = estimate.Estimate(nperiods, param, x_w, x_m, x_k, x_wmk, passign, agech0, nkids0, married0, D, dict_grid, M, N, moments_vector, var_cov, hours_p, hours_f, wr, cs, ws) hours = np.zeros(N) childcare = np.zeros(N) model_orig = util.Utility(param, N, x_w, x_m, x_k, passign, nkids0, married0, hours, childcare, agech0, hours_p, hours_f, wr, cs, ws) #Obtaining emax instance: this is fixed throughout the exercise emax_instance = output_ins.emax(param, model_orig) choices_c = {} models = [] for j in range(2): np.save( '/home/jrodriguez/NH_HC/results/model_v2/experiments/NH/shock.npy', shocks[j]) models.append( Shock(param, N, x_w, x_m, x_k, passign, nkids0, married0, hours, childcare, agech0, hours_p, hours_f, wr, cs, ws)) choices_c['Choice_' + str(j)] = output_ins.samples( param, emax_instance, models[j]) #Computing changes in % employment for control group h_sim_matrix = [] employment = [] wages = [] full = [] for j in range(2): h_sim_matrix.append(choices_c['Choice_' + str(j)]['hours_matrix']) employment.append(choices_c['Choice_' + str(j)]['hours_matrix'] > 0) full.append(choices_c['Choice_' + str(j)]['hours_matrix'] == hours_f) wages.append(choices_c['Choice_' + str(j)]['wage_matrix']) #Extensive margin elast_extensive = np.zeros(M) for j in range(M): elast_periods = np.zeros(nperiods) for t in range(nperiods): elast_periods[t] = (np.mean(employment[1][:, t, j], axis=0) - np.mean(employment[0][:, t, j], axis=0)) / ( shocks[1] * np.mean( (employment[0][:, t, j]), axis=0)) elast_extensive[j] = np.mean(elast_periods) #Intensive margin elast_intensive = np.zeros(M) for j in range(M): elast_periods = np.zeros(nperiods) for t in range(nperiods): sample = (employment[0][:, t, j] == 1) elast_periods[t] = np.mean( (h_sim_matrix[1][sample, t, j] - h_sim_matrix[0][sample, t, j]), axis=0) / (shocks[1] * np.mean(h_sim_matrix[0][sample, t, j], axis=0)) elast_intensive[j] = np.mean(elast_periods) return {'Extensive': elast_extensive, 'Intensive': elast_intensive}
def init_shock(self): if self.shock is None: shockres = self.req_get('{}/shock'.format(self.url)) self.shockurl = utils.verify_url(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token)
class Client: def __init__(self, url, user, token): self.url = utils.verify_url(url) self.user = user self.token = token self.headers = {'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain'} self.shock = None def init_shock(self): if self.shock is None: shockres = self.req_get('{}/shock'.format(self.url)) self.shockurl = utils.verify_url(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token) def upload_data_shock(self, filename, curl=False): self.init_shock() res = self.shock.upload_reads(filename, curl=curl) shock_info = {'filename': os.path.basename(filename), 'filesize': os.path.getsize(filename), 'shock_url': self.shockurl, 'shock_id': res['data']['id'], 'upload_time': str(datetime.datetime.utcnow())} return res, shock_info def upload_data_file_info(self, filename, curl=False): """ Returns FileInfo Object """ self.init_shock() res = self.shock.upload_reads(filename, curl=curl) return asmtypes.FileInfo(filename, shock_url=self.shockurl, shock_id=res['data']['id'], create_time=str(datetime.datetime.utcnow())) def submit_job(self, data): url = '{}/user/{}/job/new'.format(self.url, self.user) return self.req_post(url, data=data) def submit_data(self, data): url = '{}/user/{}/data/new'.format(self.url, self.user) return self.req_post(url, data=data) def get_job_status(self, stat_n, job_id=None, detail=False): if job_id: url = '{}/user/{}/job/{}/status'.format(self.url, self.user, job_id) else: if detail: url = '{}/user/{}/job/status?records={}&detail=True'.format( self.url, self.user, stat_n) else: url = '{}/user/{}/job/status?records={}'.format( self.url, self.user, stat_n) return self.req_get(url) def get_data_list(self): url = '{}/user/{}/data'.format(self.url, self.user) li = json.loads(self.req_get(url)) li.sort(key=lambda e: e["data_id"]) return li def get_data_list_table(self, stat_n=10): li = self.get_data_list() li = li[-stat_n:] rows = [] for data in li: data_id = data.get("data_id", "") message = data.get("message") if 'kbase_assembly_input' in data: message = data['kbase_assembly_input'].get( 'dataset_description') data_rows = assembly_data_to_rows(data) data_rows = [ [''] * 2 + r for r in data_rows] rows += [[data_id, message] + [''] * 2] rows += data_rows pt = PrettyTable(["Data ID", "Description", "Type", "Files"]); for r in rows: pt.add_row(r) return pt.get_string() def get_data_json(self, data_id): url = '{}/user/{}/data/{}'.format(self.url, self.user, data_id) return self.req_get(url) def is_job_valid(self, job_id): stat = self.get_job_status(1, job_id) return False if stat.startswith("Could not get job status") else True def is_job_done(self, job_id): stat = self.get_job_status(1, job_id) match = re.search('(complete|fail|terminated)', stat, re.IGNORECASE) return True if match else False def validate_job(self, job_id): if not self.is_job_valid(job_id): sys.exit("Invalid job ID: {}".format(job_id)) return def wait_for_job(self, job_id): self.validate_job(job_id) while not self.is_job_done(job_id): time.sleep(5) return self.get_job_status(1, job_id) def check_job(self, job_id): if not self.is_job_done(job_id): sys.stderr.write("Job in progress. Use -w to wait for the job.\n") sys.exit() def get_job_report(self, job_id): """Get the stats section of job report""" url = '{}/user/{}/job/{}/report'.format(self.url, self.user, job_id) return self.req_get(url) def get_job_log(self, job_id): """Get the log section of job report""" url = '{}/user/{}/job/{}/log'.format(self.url, self.user, job_id) return self.req_get(url) def get_job_report_full(self, job_id, stdout=False, outdir=None): url = '{}/user/{}/job/{}/report_handle'.format(self.url, self.user, job_id) handle = json.loads(self.req_get(url)) self.download_shock_handle(handle, stdout=stdout, outdir=outdir) def get_assemblies(self, job_id, asm=None, stdout=False, outdir=None): """ Assembly ID cases: None => all, 'auto' => best, numerical/string => label""" if not asm: asm = '' url = '{}/user/{}/job/{}/assemblies/{}'.format(self.url, self.user, job_id, asm) handles = json.loads(self.req_get(url)) if asm and not handles: # result-not-found exception handled by router raise Error('Invalid assembly ID: ' + asm) for h in handles: self.download_shock_handle(h, stdout, outdir, prefix=job_id+'_') return def get_job_analysis_tarball(self, job_id, outdir=None, remove=True): """Download and extract quast tarball""" url = '{}/user/{}/job/{}/analysis'.format(self.url, self.user, job_id) handle = json.loads(self.req_get(url)) filename = self.download_shock_handle(handle, outdir=outdir) dirname = filename.split('/')[-1].split('.')[0] destpath = os.path.join(outdir, dirname) if outdir else dirname tar = tarfile.open(filename) tar.extractall(path=destpath) tar.close() sys.stderr.write("HTML extracted: {}/report.html\n".format(destpath)) if remove: os.remove(filename) return '{}/report.html\n'.format(destpath) def get_job_data(self, job_id, outdir=None): self.get_assemblies(job_id, outdir=outdir) self.get_job_report_full(job_id, outdir=outdir) self.get_job_analysis_tarball(job_id, outdir=outdir) def get_available_modules(self): url = '{}/module/all/avail/'.format(self.url) return self.req_get(url) def get_available_recipes(self): url = '{}/recipe/all/avail/'.format(self.url) return self.req_get(url) def kill_jobs(self, job_id=None): if job_id: url = '{}/user/{}/job/{}/kill'.format(self.url, self.user, job_id) else: url = '{}/user/{}/job/all/kill'.format(self.url, self.user) return self.req_get(url) def get_config(self): return self.req_get('{}/admin/system/config'.format(self.url)) def req(self, url, req_type='get', data=None, ret=None): """Authenticated request. Parses CherryPy message and raises HTTPError""" # print "req_{}: {}".format(req_type, url) try: if req_type == 'get': r = requests.get(url, headers=self.headers) elif req_type == 'post': r = requests.post(url, data=data, headers=self.headers) except requests.exceptions.ConnectionError as e: raise ConnectionError(e) if r.status_code != requests.codes.ok: cherry = re.compile("^HTTPError: \(\d+, '(.*?)'", re.MULTILINE) match = cherry.search(r.content) msg = match.group(1) if match else r.reason raise HTTPError("{} (HTTP status code {})".format(msg, r.status_code)) return {'text': r.text, 'json': r.json}.get(ret, r.content) def req_get(self, url, ret=None): return self.req(url, req_type='get', ret=ret) def req_post(self, url, data=None, ret=None): return self.req(url, req_type='post', data=data, ret=ret) @contextlib.contextmanager def smart_open(self, filename=None): if filename and filename != '-': fh = open(filename, 'w') else: fh = sys.stdout try: yield fh finally: if fh is not sys.stdout: fh.close() def download_shock_handle(self, handle, stdout=False, outdir=None, prefix=''): shock_url = handle.get('shock_url') or handle.get('url') shock_id = handle.get('shock_id') or handle.get('id') if not shock_url or not shock_id: raise Error("Invalid shock handle: {}".format(handle)) url = "{}/node/{}?download".format(shock_url, shock_id) if stdout: filename = None else: outdir = utils.verify_dir(outdir) if outdir else None filename = handle.get('filename') or handle.get('local_file') or shock_id filename = prefix + filename.split('/')[-1] filename = os.path.join(outdir, filename) if outdir else filename headers = {'Authorization': 'OAuth {}'.format(self.token)} r = requests.get(url, stream=True, headers=headers) with self.smart_open(filename) as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() if filename: if not os.path.exists(filename): raise Error('Data exists but file not properly saved') else: sys.stderr.write("File downloaded: {}\n".format(filename)) return filename
class Client: def __init__(self, url, user, token): self.url = utils.verify_url(url) self.user = user self.token = token self.headers = {'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain'} self.shock = None def init_shock(self): if self.shock is None: shockres = self.req_get('{}/shock'.format(self.url)) self.shockurl = utils.verify_url(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token) def upload_data_shock(self, filename, curl=False): self.init_shock() res = self.shock.upload_reads(filename, curl=curl) shock_info = {'filename': os.path.basename(filename), 'filesize': os.path.getsize(filename), 'shock_url': self.shockurl, 'shock_id': res['data']['id'], 'upload_time': str(datetime.datetime.utcnow())} return res, shock_info def upload_data_file_info(self, filename, curl=False): """ Returns FileInfo Object """ self.init_shock() res = self.shock.upload_reads(filename, curl=curl) return asmtypes.FileInfo(filename, shock_url=self.shockurl, shock_id=res['data']['id'], create_time=str(datetime.datetime.utcnow())) def submit_job(self, data): url = '{}/user/{}/job/new'.format(self.url, self.user) return self.req_post(url, data=data) def submit_data(self, data): url = '{}/user/{}/data/new'.format(self.url, self.user) return self.req_post(url, data=data) def get_job_status(self, stat_n, job_id=None, detail=False): if job_id: url = '{}/user/{}/job/{}/status'.format(self.url, self.user, job_id) else: if detail: url = '{}/user/{}/job/status?records={}&detail=True'.format( self.url, self.user, stat_n) else: url = '{}/user/{}/job/status?records={}'.format( self.url, self.user, stat_n) return self.req_get(url) def get_data_list(self): url = '{}/user/{}/data'.format(self.url, self.user) li = json.loads(self.req_get(url)) li.sort(key=lambda e: e["data_id"]) return li def get_data_list_table(self, stat_n=10): li = self.get_data_list() li = li[-stat_n:] rows = [] for data in li: data_id = data.get("data_id", "") message = data.get("message") if 'kbase_assembly_input' in data: message = data['kbase_assembly_input'].get( 'dataset_description') data_rows = assembly_data_to_rows(data) data_rows = [ [''] * 2 + r for r in data_rows] rows += [[data_id, message] + [''] * 2] rows += data_rows pt = PrettyTable(["Data ID", "Description", "Type", "Files"]); for r in rows: pt.add_row(r) return pt.get_string() def get_data_json(self, data_id): url = '{}/user/{}/data/{}'.format(self.url, self.user, data_id) return self.req_get(url) def is_job_valid(self, job_id): stat = self.get_job_status(1, job_id) return False if stat.startswith("Could not get job status") else True def is_job_done(self, job_id): stat = self.get_job_status(1, job_id) match = re.search('(complete|fail|terminated)', stat, re.IGNORECASE) return True if match else False def validate_job(self, job_id): if not self.is_job_valid(job_id): sys.exit("Invalid job ID: {}".format(job_id)) return def wait_for_job(self, job_id, interval=30): self.validate_job(job_id) try: interval = int(interval) except ValueError: interval = 30 if interval < 2: interval = 2 while not self.is_job_done(job_id): time.sleep(interval) return self.get_job_status(1, job_id) def check_job(self, job_id): if not self.is_job_done(job_id): sys.stderr.write("Job in progress. Use -w to wait for the job.\n") sys.exit() def get_job_report(self, job_id): """Get the stats section of job report""" url = '{}/user/{}/job/{}/report'.format(self.url, self.user, job_id) return self.req_get(url) def get_job_log(self, job_id): """Get the log section of job report""" url = '{}/user/{}/job/{}/log'.format(self.url, self.user, job_id) return self.req_get(url) def get_job_report_full(self, job_id, stdout=False, outdir=None): url = '{}/user/{}/job/{}/report_handle'.format(self.url, self.user, job_id) handle = json.loads(self.req_get(url)) self.download_shock_handle(handle, stdout=stdout, outdir=outdir) def get_assemblies(self, job_id, asm=None, stdout=False, outdir=None): """ Assembly ID cases: None => all, 'auto' => best, numerical/string => label""" if not asm: asm = '' url = '{}/user/{}/job/{}/assemblies/{}'.format(self.url, self.user, job_id, asm) handles = json.loads(self.req_get(url)) if asm and not handles: # result-not-found exception handled by router raise Error('Invalid assembly ID: ' + asm) for h in handles: self.download_shock_handle(h, stdout, outdir, prefix=job_id+'_') return def get_job_analysis_tarball(self, job_id, outdir=None, remove=True): """Download and extract quast tarball""" url = '{}/user/{}/job/{}/analysis'.format(self.url, self.user, job_id) handle = json.loads(self.req_get(url)) filename = self.download_shock_handle(handle, outdir=outdir) dirname = filename.split('/')[-1].split('.')[0] destpath = os.path.join(outdir, dirname) if outdir else dirname tar = tarfile.open(filename) tar.extractall(path=destpath) tar.close() sys.stderr.write("HTML extracted: {}/report.html\n".format(destpath)) if remove: os.remove(filename) return '{}/report.html\n'.format(destpath) def get_job_data(self, job_id, outdir=None): self.get_assemblies(job_id, outdir=outdir) self.get_job_report_full(job_id, outdir=outdir) self.get_job_analysis_tarball(job_id, outdir=outdir) def get_available_modules(self): url = '{}/module/all/avail/'.format(self.url) return self.req_get(url) def get_available_recipes(self): url = '{}/recipe/all/avail/'.format(self.url) return self.req_get(url) def kill_jobs(self, job_id=None): if job_id: url = '{}/user/{}/job/{}/kill'.format(self.url, self.user, job_id) else: url = '{}/user/{}/job/all/kill'.format(self.url, self.user) return self.req_get(url) def get_config(self): return self.req_get('{}/admin/system/config'.format(self.url)) def req(self, url, req_type='get', data=None, ret=None): """Authenticated request. Parses CherryPy message and raises HTTPError""" try: if req_type == 'get': r = requests.get(url, headers=self.headers) elif req_type == 'post': r = requests.post(url, data=data, headers=self.headers) except requests.exceptions.ConnectionError as e: raise ConnectionError(e) if r.status_code != requests.codes.ok: cherry = re.compile("^HTTPError: \(\d+, '(.*?)'", re.MULTILINE) match = cherry.search(r.content) msg = match.group(1) if match else r.reason raise HTTPError("{} (HTTP status code {})".format(msg, r.status_code)) return {'text': r.text, 'json': r.json}.get(ret, r.content) def req_get(self, url, ret=None): return self.req(url, req_type='get', ret=ret) def req_post(self, url, data=None, ret=None): return self.req(url, req_type='post', data=data, ret=ret) @contextlib.contextmanager def smart_open(self, filename=None): if filename and filename != '-': fh = open(filename, 'w') else: fh = sys.stdout try: yield fh finally: if fh is not sys.stdout: fh.close() def download_shock_handle(self, handle, stdout=False, outdir=None, prefix=''): shock_url = handle.get('shock_url') or handle.get('url') shock_id = handle.get('shock_id') or handle.get('id') if not shock_url or not shock_id: raise Error("Invalid shock handle: {}".format(handle)) url = "{}/node/{}?download".format(shock_url, shock_id) if stdout: filename = None else: outdir = utils.verify_dir(outdir) if outdir else None filename = handle.get('filename') or handle.get('local_file') or shock_id filename = prefix + filename.split('/')[-1] filename = os.path.join(outdir, filename) if outdir else filename headers = {'Authorization': 'OAuth {}'.format(self.token)} r = requests.get(url, stream=True, headers=headers) with self.smart_open(filename) as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() if filename: if not os.path.exists(filename): raise Error('Data exists but file not properly saved') else: sys.stderr.write("File downloaded: {}\n".format(filename)) return filename
class Client: def __init__(self, url, user, token): self.port = 8000 ## change if url.find(':') == -1: # Port not included self.url = url + ':{}'.format(self.port) else: self.url = url self.user = user self.token = token self.headers = { 'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain' } shockres = requests.get('http://{}/shock'.format(self.url), headers=self.headers).text self.shockurl = 'http://{}/'.format(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token) def get_job_data(self, job_id=None, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/shock_node'.format( self.url, self.user, job_id), headers=self.headers) # Download files try: nodes_map = json.loads(res.text) for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except: print traceback.format_tb(sys.exc_info()[2]) print sys.exc_info() raise Exception("Error retrieving results") return def get_assemblies(self, job_id=None, asm_id=None, stdout=False, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/assembly'.format( self.url, self.user, job_id), headers=self.headers) # Download files try: nodes_map = json.loads(res.text) if stdout: # Get first one and print asm_file = self.shock.download_file(nodes_map.values()[0], outdir=outdir) with open(asm_file) as f: for line in f: print line elif asm_id: ordered = collections.OrderedDict(sorted(nodes_map.items())) id = ordered.values()[int(asm_id) - 1] self.shock.download_file(id, outdir=outdir) else: for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except: print traceback.format_tb(sys.exc_info()[2]) print sys.exc_info() raise Exception("Error retrieving results") return def upload_data_shock(self, filename, curl=False): res = self.shock.upload_reads(filename, curl=curl) shock_info = { 'filename': os.path.basename(filename), 'filesize': os.path.getsize(filename), 'shock_url': self.shockurl, 'shock_id': res['data']['id'], 'upload_time': str(datetime.datetime.utcnow()) } return res, shock_info def upload_data_file_info(self, filename, curl=False): """ Returns FileInfo Object """ res = self.shock.upload_reads(filename, curl=curl) return FileInfo(self.shockurl, res['data']['id'], os.path.getsize(filename), os.path.basename(filename), str(datetime.datetime.utcnow())) def submit_job(self, data): url = 'http://{}/user/{}/job/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def submit_data(self, data): url = 'http://{}/user/{}/data/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def get_job_status(self, stat_n, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/status'.format( self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/status?records={}'.format( self.url, self.user, stat_n) r = requests.get(url, headers=self.headers) return r.content def get_available_modules(self): url = 'http://{}/module/all/avail/'.format(self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def kill_jobs(self, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/kill'.format( self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/all/kill'.format(self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def get_config(self): return requests.get('http://{}/admin/system/config'.format( self.url)).content
class Client: def __init__(self, url, user, token): self.port = 8000 ## change if url.find(':') == -1: # Port not included self.url = url + ':{}'.format(self.port) else: self.url = url self.user = user self.token = token self.headers = {'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain'} shockres = requests.get('http://{}/shock'.format(self.url), headers=self.headers).text self.shockurl = 'http://{}/'.format(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token) def get_job_data(self, job_id=None, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/shock_node'.format( self.url, self.user, job_id), headers=self.headers) # Download files try: nodes_map = json.loads(res.text) for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except: print traceback.format_tb(sys.exc_info()[2]) print sys.exc_info() raise Exception("Error retrieving results") return def get_assemblies(self, job_id=None, asm_id=None, stdout=False, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/assembly'.format( self.url, self.user, job_id), headers=self.headers) # Download files try: nodes_map = json.loads(res.text) if stdout: # Get first one and print asm_file = self.shock.download_file(nodes_map.values()[0], outdir=outdir) with open(asm_file) as f: for line in f: print line elif asm_id: ordered = collections.OrderedDict(sorted(nodes_map.items())) id = ordered.values()[int(asm_id)-1] self.shock.download_file(id , outdir=outdir) else: for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except: print traceback.format_tb(sys.exc_info()[2]) print sys.exc_info() raise Exception("Error retrieving results") return def upload_data_shock(self, filename, curl=False): res = self.shock.upload_reads(filename, curl=curl) shock_info = {'filename': os.path.basename(filename), 'filesize': os.path.getsize(filename), 'shock_url': self.shockurl, 'shock_id': res['data']['id'], 'upload_time': str(datetime.datetime.utcnow())} return res, shock_info def upload_data_file_info(self, filename, curl=False): """ Returns FileInfo Object """ res = self.shock.upload_reads(filename, curl=curl) return FileInfo(self.shockurl, res['data']['id'], os.path.getsize(filename), os.path.basename(filename), str(datetime.datetime.utcnow())) def submit_job(self, data): url = 'http://{}/user/{}/job/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def submit_data(self, data): url = 'http://{}/user/{}/data/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def get_job_status(self, stat_n, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/status'.format(self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/status?records={}'.format( self.url, self.user, stat_n) r = requests.get(url, headers=self.headers) return r.content def get_available_modules(self): url = 'http://{}/module/all/avail/'.format(self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def kill_jobs(self, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/kill'.format(self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/all/kill'.format( self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def get_config(self): return requests.get('http://{}/admin/system/config'.format(self.url)).content
class Client: def __init__(self, url, user, token): self.port = 8000 ## change if url.find(':') == -1: # Port not included self.url = url + ':{}'.format(self.port) else: self.url = url self.user = user self.token = token self.headers = {'Authorization': '{}'.format(self.token), 'Content-type': 'application/json', 'Accept': 'text/plain'} shockres = requests.get('http://{}/shock'.format(self.url), headers=self.headers).text self.shockurl = 'http://{}/'.format(json.loads(shockres)['shockurl']) self.shock = Shock(self.shockurl, self.user, self.token) def get_job_data(self, job_id=None, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/shock_node'.format( self.url, self.user, job_id), headers=self.headers) if res.status_code == 403: raise ValueError('Invalid Job Id') # Download files try: nodes_map = json.loads(res.text) for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except Exception as e: print e raise Exception("Error retrieving results") return def get_assemblies(self, job_id=None, asm_id=None, stdout=False, outdir=None): if not job_id: raise NotImplementedError('Job id required') # Get node id res = requests.get('http://{}/user/{}/job/{}/assembly'.format( self.url, self.user, job_id), headers=self.headers) # Download files try: nodes_map = json.loads(res.text) if stdout: # Get first one and print asm_file = self.shock.download_file(nodes_map.values()[0], outdir=outdir) with open(asm_file) as f: for line in f: print line elif asm_id: ordered = collections.OrderedDict(sorted(nodes_map.items())) id = ordered.values()[int(asm_id)-1] self.shock.download_file(id , outdir=outdir) else: for node_id in nodes_map.values(): self.shock.download_file(node_id, outdir=outdir) except: print traceback.format_tb(sys.exc_info()[2]) print sys.exc_info() raise Exception("Error retrieving results") return def upload_data_shock(self, filename, curl=False): res = self.shock.upload_reads(filename, curl=curl) shock_info = {'filename': os.path.basename(filename), 'filesize': os.path.getsize(filename), 'shock_url': self.shockurl, 'shock_id': res['data']['id'], 'upload_time': str(datetime.datetime.utcnow())} return res, shock_info def upload_data_file_info(self, filename, curl=False): """ Returns FileInfo Object """ res = self.shock.upload_reads(filename, curl=curl) return asmtypes.FileInfo(filename, shock_url=self.shockurl, shock_id=res['data']['id'], create_time=str(datetime.datetime.utcnow())) # return FileInfo(self.shockurl, res['data']['id'], os.path.getsize(filename), # os.path.basename(filename), str(datetime.datetime.utcnow())) def submit_job(self, data): url = 'http://{}/user/{}/job/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def submit_data(self, data): url = 'http://{}/user/{}/data/new'.format(self.url, self.user) r = requests.post(url, data=data, headers=self.headers) return r.content def get_job_status(self, stat_n, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/status'.format(self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/status?records={}'.format( self.url, self.user, stat_n) r = requests.get(url, headers=self.headers) return r.content def get_data_list(self): url = 'http://{}/user/{}/data'.format(self.url, self.user) r = requests.get(url, headers=self.headers) li = json.loads(r.content) li.sort(key=lambda e: e["data_id"]) return li def get_data_list_table(self, stat_n=10): li = self.get_data_list() li = li[-stat_n:] rows = [] for data in li: data_id = data.get("data_id", "") message = data.get("message", "") data_rows = assembly_data_to_rows(data) data_rows = [ [''] * 2 + r for r in data_rows] rows += [[data_id, message] + [''] * 2] rows += data_rows pt = PrettyTable(["Data ID", "Description", "Type", "Files"]); for r in rows: pt.add_row(r) return pt.get_string() def get_data_json(self, data_id): url = 'http://{}/user/{}/data/{}'.format(self.url, self.user, data_id) r = requests.get(url, headers=self.headers) return r.content def is_job_valid(self, job_id): stat = self.get_job_status(1, job_id) return False if stat.startswith("Could not") else True def validate_job(self, job_id): if not self.is_job_valid(job_id): sys.stderr.write("Invalid job ID: {}\n".format(job_id)) sys.exit() return def wait_for_job(self, job_id): self.validate_job(job_id) stat = self.get_job_status(1, job_id) while not re.search('(complete|fail)', stat, re.IGNORECASE): time.sleep(5) stat = self.get_job_status(1, job_id) return stat def get_job_report(self, job_id, log=False): url = 'http://{}/user/{}/job/{}/report'.format(self.url, self.user, job_id) r = requests.get(url, headers=self.headers) if not r.content: sys.stderr.write("Job in progress. Use -w to wait for the job.\n") sys.exit() try: info = json.loads(r.content)[0]['file_infos'][0] url = '{}/node/{}?download'.format(info['shock_url'], info['shock_id']) report = shock_get(url).content if not log: lines = str.splitlines(report, 1) report = ''.join(lines[0:20]) except Exception as e: print e raise Exception("Error retrieving job report") return report def get_available_modules(self): url = 'http://{}/module/all/avail/'.format(self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def kill_jobs(self, job_id=None): if job_id: url = 'http://{}/user/{}/job/{}/kill'.format(self.url, self.user, job_id) else: url = 'http://{}/user/{}/job/all/kill'.format( self.url, self.user) r = requests.get(url, headers=self.headers) return r.content def get_config(self): return requests.get('http://{}/admin/system/config'.format(self.url)).content