def run(white_engine_file_num): #for p in sys.path: print p # Load the list of the programs to play as white in this run. path = '' log_file = path + "tournament.log." + str(white_engine_file_num) file = open(path + 'white_engines_' + str(white_engine_file_num) + '.txt', 'r') white_engines = [] for engine in file.readlines(): white_engines.append(engine.rstrip()) file.close() print white_engines # Load the list of all programs. file = open(path + 'engines.txt', 'r') engines = [] for engine in file.readlines(): engines.append(engine.rstrip()) file.close() print engines # Iterate over all possible games. Check the log: if this game was already played, skip it. # If the game was not played, play it! for white_engine in white_engines: for black_engine in engines: # skip self-games if white_engine == black_engine: continue # check the log; was this game recorded yet? # Make sure the log exists. try: log = open(log_file, 'r') except IOError: pass else: target_str = 'white=' + white_engine + ',black=' + black_engine found = False for line in log.readlines(): if line.startswith(target_str): found = True break log.close() if found: continue # run a game and log the results. driver.run(white_engine, black_engine, log_file, False) print "\nDONE!!!!!!!\n"
def peval(input_file, output_file, llpe_analysis, log=None, trail=None, **opts): "intra module previrtualization" if not trail: opt = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) pre = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) done = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) opt.close() pre.close() done.close() if llpe_analysis: args = [ '-load=%s' % config.LLPE_MAIN, '-load=%s' % config.LLPE_DRIVER, '-loop-simplify', '-lcssa', '-llpe', '-llpe-omit-checks', '-llpe-single-threaded', input_file, '-o=%s' % done.name ] run("opt", args) shutil.copy(done.name, input_file) #pre_args=[config.LLVM['opt'], '-load=%s' % config.OCCAM_LIB, # opt.name, '-o=%s' % done.name, # '-Ppeval'] out = [''] shutil.copy(input_file, done.name) while True: retcode = optimize(done.name, opt.name, **opts) if retcode != 0: # TODO: an error occurred shutil.copy(done.name, output_file) return retcode if previrt_progress(opt.name, done.name, ['-Ppeval'], output=out): print "previrt successful" if log is not None: log.write(out[0]) else: break shutil.move(opt.name, output_file) try: os.unlink(done.name) os.unlink(pre.name) except: pass return retcode else: assert False
def run(white_engine_file_num): # for p in sys.path: print p # Load the list of the programs to play as white in this run. path = "/u/epnichol/b351/othello/src/driver/" log_file = path + "tournament.log." + str(white_engine_file_num) file = open(path + "white_engines_" + str(white_engine_file_num) + ".txt", "r") white_engines = [] for engine in file.readlines(): white_engines.append(engine.rstrip()) file.close() print white_engines # Load the list of all programs. file = open(path + "engines.txt", "r") engines = [] for engine in file.readlines(): engines.append(engine.rstrip()) file.close() print engines # Iterate over all possible games. Check the log: if this game was already played, skip it. # If the game was not played, play it! for white_engine in white_engines: for black_engine in engines: # skip self-games if white_engine == black_engine: continue # check the log; was this game recorded yet? # Make sure the log exists. try: log = open(log_file, "r") except IOError: pass else: target_str = "white=" + white_engine + ",black=" + black_engine found = False for line in log.readlines(): if line.startswith(target_str): found = True break log.close() if found: continue # run a game and log the results. driver.run(white_engine, black_engine, log_file, False) print "\nDONE!!!!!!!\n"
def peval(input_file, output_file, llpe_analysis, log=None, trail=None, **opts): "intra module previrtualization" if not trail: opt = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) pre = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) done = tempfile.NamedTemporaryFile(suffix='.bc', delete=False) opt.close() pre.close() done.close() if llpe_analysis: args=['-load=%s' % config.LLPE_MAIN, '-load=%s' % config.LLPE_DRIVER, '-loop-simplify', '-lcssa', '-llpe', '-llpe-omit-checks', '-llpe-single-threaded', input_file, '-o=%s' % done.name] run("opt", args) shutil.copy(done.name, input_file) #pre_args=[config.LLVM['opt'], '-load=%s' % config.OCCAM_LIB, # opt.name, '-o=%s' % done.name, # '-Ppeval'] out = [''] shutil.copy(input_file, done.name) while True: retcode = optimize(done.name, opt.name, **opts) if retcode != 0: # TODO: an error occurred shutil.copy(done.name, output_file) return retcode if previrt_progress(opt.name, done.name, ['-Ppeval'], output=out): print "previrt successful" if log is not None: log.write(out[0]) else: break shutil.move(opt.name, output_file) try: os.unlink(done.name) os.unlink(pre.name) except: pass return retcode else: assert False
def test(query, expected, name, runopts=None, testopts=None): if runopts is None: runopts = {} else: for k, v in runopts.items(): if isinstance(v, str): runopts[k] = eval(v) if testopts is None: testopts = {} if 'max_batch_rows' not in runopts: runopts['max_batch_rows'] = 3 if expected == '': expected = None driver.run(query, expected, name, runopts, testopts)
def index(pcap): global cur_filename global ec global tw global cols # Grab sort values, defaulted to none sort_type = request.args.get('sort_type', None) # Halt background collection process tw.running = False # Check that input pcap file exists if os.path.exists(os.path.join(os.getcwd(), 'pcap_files', pcap)): # If working on a new pcap file, update our entropy computer with new pcap. # This is done to prevent needless caluclation of the same pcap over and over again if cur_filename != pcap: print(f'New File: {pcap}') cur_filename = pcap ec = Entropy_Computer() if driver.run(ec, pcap=pcap) == -1: return 'Failure' # Get dataframe and sort (if needed) df = get_dataframe_from_entropy_stats(ec.entropy_stats) if sort_type: df = df.sort_values(by=sort_type) # Return HTML code from template w/ passed in variables return render_template('pcap.html', df=df, cols=cols, pcap=pcap) else: return 'File not found'
def strip(input_file, output_file, **opts): args=[input_file, '-o', output_file, '-strip', '-globaldce', '-globalopt', '-strip-dead-prototypes', ] return run(config.LLVM['opt'], args, **opts)
def after_create(user): if user.name is not None: name = ' (name: %s)' % user.name else: name = '' logging.info("Created a new user %s%s of type %s, adding to gid %d" % (user.username, name, user.type, user.gid)) group = models.Group.find_by_gid(user.gid) try: group.members.append(user.username) except AttributeError: if group.members: group.members = [group.members, user.username] else: group.members = [user.username] group.save() logging.info("Creating filesystem %s" % user.home) driver.run(['/usr/bin/hcs-quota', 'create', user.username, user.quota]) if not os.path.exists(user.home): logger.error("Homedir doesn't look right here. NFS issue?") raise AssertionError user.take_file(user.home) driver.run(['/usr/bin/hcs-newcert', '-u', user.username]) user.create_initial_files() logging.debug("Created initial files, user.type = %s" % user.type) # Let the cache on the mailservers get up to speed time.sleep(1.2) if user.type == 'group': # Goes to the account + access list mailers.UserMailer.deliver_creation(user, access_list=True) # Goes to the account mailers.UserMailer.deliver_www_info(user, access_list=False) mailers.UserMailer.deliver_user_info(user, access_list=False) elif user.type == 'member': if not user.state('no_welcome_email'): logging.debug("About to send member welcome") mailers.UserMailer.deliver_member_welcome(user, outside=False) else: logging.debug("Opted out of member welcome") elif user.type == 'general': # TODO: write a more specific email if not user.state('no_welcome_email'): logging.debug("About to send general welcome. TODO: make it better") mailers.UserMailer.deliver_member_welcome(user, outside=False) else: logging.debug("Opted out of general welcome") mailers.UserMailer.deliver_acctserv_notification(user, 'creation')
def test(query, expected, name, runopts=None, testopts=None): if runopts is None: runopts = {} else: for k, v in runopts.items(): if isinstance(v, str): try: runopts[k] = eval(v) except NameError: runopts[k] = v if testopts is None: testopts = {} if 'max_batch_rows' not in runopts: runopts['max_batch_rows'] = 3 if expected == '': expected = None driver.run(query, expected, name, runopts, testopts)
def strip(input_file, output_file, **opts): args = [ input_file, '-o', output_file, '-strip', '-globaldce', '-globalopt', '-strip-dead-prototypes', ] return run(config.getLLVMTool('opt'), args, **opts)
def main(args=None): """Main method that parses command line arguments and calls run method""" if args is None: args = sys.argv[1: ] parser = ArgumentParser(description="Run a Machine Learning algorithm using command line arguments") parser.add_argument( '--input-data-file', type=FileType('r'), required=True, dest='input_data_file', help='csv file for input data') options = parser.parse_args(args) output = run( options.input_data_file)
from driver import run run(filename='input1', workers_n=2, base_time=0) run(filename='input', workers_n=5, base_time=60)
parser.add_option("-f", "--file", action="store", type="string", dest="filename") parser.add_option("-o", "--output", action="store", type="string", dest="output") parser.add_option("-s", "--scale", action="store", type="string", dest="scale") parser.add_option("-p", "--play", action="store_true", dest="play") (options, args) = parser.parse_args() if options.filename: input = file2string(options.filename) else: input = args[0] if options.output: output = options.output else: output = "out" if options.scale: SCALE = options.scale else: SCALE = "diatonic" run(input, output, SCALE) if options.play: cmd = "play " + output + ".wav" os.system(cmd)
import driver # A rudimentary times for coarse-grained profiling class Timer(object): def __init__(self, verbose=False): self.verbose = verbose def __enter__(self): self.start = time.time() return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 # millisecs if self.verbose: print "elapsed time: %f ms" % self.msecs if __name__ == "__main__": # Call the AnnTools pipeline if len(sys.argv) > 1: input_file_name = sys.argv[1] with Timer() as t: driver.run(input_file_name, "vcf") # Save results file and log file to S3 results bucket else: print "A valid .vcf file must be provided as input to this program."
#!/usr/bin/env python import driver probname = 'p2-half_slab' argstr = (f'{probname} ' f'--sigma_t 8 ' f'--sigma_s0 6.4 ' f'--sigma_s1 1.6 ' f'--zstop 1 ' f'--num_ordinates 4 ' f'--num_hidden_layer_nodes 5 ' f'--learning_rate 1e-3 ' f'--epsilon_sn 1e-6 ' f'--epsilon_nn 1e-13 ' f'--num_sn_zones 50 ' f'--num_mc_zones 50 ' f'--num_nn_zones 50 ' f'--num_particles 1000000 ' f'--num_physical_particles 8 ' f'--uniform_source_extent 0 0.5 ' f'--source_magnitude 8') args = driver.parse_args(argstr.split()) driver.run(args)
except OSError: mpi_install = False print "MPI not installed" for num_nodes in [2, 10, 25, 50, 100, 150, 200]: in_path = "graphs/timings/graph_{0}".format(num_nodes) print "Running library max flow on {0}...".format(in_path) graph = driver.file_to_graph(in_path) library_max_flow, _ = driver.find_min_cut_serial(graph) print "Running serial max flow on {0}...".format(in_path) serial_max_flow = serial.run(in_path) print "Running map/reduce max flow on {0}...".format(in_path) mr_max_flow, _ = driver.run(in_path) if mpi_installed: print "Running MPI max flow on {0}...".format(in_path) mpi_output = sp.check_output( ["mpirun", "-n", "4", "python", "mpi.py", in_path]) mpi_max_flow = int(mpi_output.split("\n")[-2]) assert (library_max_flow == serial_max_flow) assert (library_max_flow == mr_max_flow) if mpi_installed: assert (library_max_flow == mpi_max_flow) print "Success! All implementations have max_flow={0} for n={1}".format( library_max_flow, num_nodes)
cwd='/', stderr=PIPE).wait() except OSError as e: logging.error(e) except ValueError as e: logging.error(e) if __name__ == '__main__': # Call the AnnTools pipeline # Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html # https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.03.html#GettingStarted.Python.03.03 # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html if len(sys.argv) > 1: with Timer(): driver.run(sys.argv[1], 'vcf') results_bucket = Config.AWS_S3_RESULTS_BUCKET fullFilePath = sys.argv[2] basePath, userName, job_id, inputFile, myName, user_email = sys.argv[ 3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8] resultFile = inputFile.replace('.', '.annot.') resultPath = f'{basePath}/jobs/{userName}/{job_id}/{resultFile}' resultKey = f'{myName}/{userName}/{job_id}/{resultFile}' logFile = f'{inputFile}.count.log' logPath = f'{basePath}/jobs/{userName}/{job_id}/{logFile}' logKey = f'{myName}/{userName}/{job_id}/{logFile}' # Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html s3_client = boto3.client('s3', region_name=Config.AWS_REGION_NAME) try:
"job_id": job_id, "user_id": user_id, "input_file_name": input_file_name, "s3_inputs_bucket": s3_inputs_bucket, "s3_key_input_file": s3_key_input_file, "submit_time": submit_time, "job_status": "RUNNING" } # updating dynamodDb instance with same job_id dynamodb = boto3.resource('dynamodb', region_name=REGION) ann_table = dynamodb.Table(DYNAMODB) ann_table.put_item(Item=data) with Timer() as t: driver.run(filepath, 'vcf') print("Total runtime: %s seconds" % t.secs) ##we are looking to upload TWO of the generated files from the program s3 = boto3.resource('s3') ##finding and uploading annot annot_filepath = (filepath + '.annot').replace('.vcf.annot', '.annot.vcf') annot_s3_key = (s3_key_input_file.replace('~', '/') + '.annot').replace('.vcf.annot', '.annot.vcf') s3.meta.client.upload_file(annot_filepath, 'gas-results', annot_s3_key) print("annotations file uploaded to s3") ##finding and uploading log log_filepath = (filepath + '.count.log')
import driver from importlib import reload reload(driver) print('homogeneous saturn') params = {'mcore': 10, 'z1': 0.02, 'start_t': 1e3} driver.run('sat', params, homog=True, dump=False, custom_evol_params={}, custom_mesh_params={}) print() reload(driver) print('adiabatic saturn rainout') params.pop('rrho_where_have_helium_gradient', None) params['phase_t_offset'] = 0 driver.run('sat', params, homog=False, dump=False) print() reload(driver) print('superadiabatic saturn rainout') params['rrho_where_have_helium_gradient'] = 3e-2 driver.run('sat', params, homog=False, dump=False)
parser.add_option("-o", "--output", action="store", type="string", dest="output") parser.add_option("-s", "--scale", action="store", type="string", dest="scale") parser.add_option("-p", "--play", action="store_true", dest="play") (options, args) = parser.parse_args() if options.filename: input = file2string(options.filename) else: input = args[0] if options.output: output = options.output else: output = "out" if options.scale: SCALE = options.scale else: SCALE = "diatonic" run(input, output, SCALE) if options.play: cmd = "play " + output + ".wav" os.system(cmd)
def drive(node_list, robot): for node in node_list: driver.run(node.x, node.y, robot)
def optimize(input_file, output_file, **opts): return run( config.getLLVMTool('opt'), ['-disable-simplify-libcalls', input_file, '-o', output_file, '-O3'], **opts)
def callgraph(input_file, output_file, **opts): args = [input_file, '-o', '/dev/null', '-dot-callgraph'] x = run(config.getLLVMTool('opt'), args, **opts) if x == 0: shutil.move('callgraph.dot', output_file) return x
from driver import run filename = 'input1' factor = 1 # 100 run(filename, factor=factor)
except OSError: mpi_install = False print "MPI not installed" for num_nodes in [2, 10, 25, 50, 100, 150, 200]: in_path = "graphs/timings/graph_{0}".format(num_nodes) print "Running library max flow on {0}...".format(in_path) graph = driver.file_to_graph(in_path) library_max_flow, _ = driver.find_min_cut_serial(graph) print "Running serial max flow on {0}...".format(in_path) serial_max_flow = serial.run(in_path) print "Running map/reduce max flow on {0}...".format(in_path) mr_max_flow, _ = driver.run(in_path) if mpi_installed: print "Running MPI max flow on {0}...".format(in_path) mpi_output = sp.check_output(["mpirun", "-n", "4", "python", "mpi.py", in_path]) mpi_max_flow = int(mpi_output.split("\n")[-2]) assert (library_max_flow == serial_max_flow) assert(library_max_flow == mr_max_flow) if mpi_installed: assert(library_max_flow == mpi_max_flow) print "Success! All implementations have max_flow={0} for n={1}".format(library_max_flow, num_nodes)
print("Done.") td = timedelta(seconds=(dt[1] - dt[0]).total_seconds()) / (numframes + 1) curdate = dt[0] + td fn_pattern = driver.config_datasource.get("filenames", "pattern") fn_ext = driver.config_datasource.get("filenames", "ext") for i in xrange(len(R_ip)): outfn = datetime.strftime(curdate, fn_pattern) + '.' + fn_ext print("Saving result to %s...") % outfn, sys.stdout.flush() exporter(R_ip[i], os.path.join(output_path, outfn), geodata) print("Done.") curdate += td if save_original == True: if output_path != os.path.split(fn[0])[0]: shutil.copy(fn[0], output_path) if output_path != os.path.split(fn[1])[0]: shutil.copy(fn[1], output_path) driver = driver.Driver() driver.parse_args() driver.read_configs() driver.run(worker, num_prev_precip_fields=1, exporter_method=exporter_method)
print "usage: python segment.py input_image output_image" sys.exit(-1) input_image = args[1] output_image = args[2] # Read in image original_image = img.imread(input_image) height, width, channels = original_image.shape print "Processing image (%dx%d)..." % (width, height) # Convert image to graph in adjacency list format image_as_graph_path = "tmp/image_graph.txt" ip.convert(input_image, image_as_graph_path) # Find the cut using MapReduce max-flow implmentation max_flow, cut = driver.run(image_as_graph_path) # gererate and output segmented image segmented_image = np.zeros((height, width)) for i in xrange(height): for j in xrange(width): my_id = str(i * width + j) # my_id is reachable from s if my_id in cut: segmented_image[i,j] = 255 # my_id is not reachable from s else: segmented_image[i,j] = 0 img.imsave("output/{0}".format(output_image), segmented_image, cmap="gray")
def callgraph(input_file, output_file, **opts): args=[input_file, '-o', '/dev/null', '-dot-callgraph'] x = run(config.LLVM['opt'], args, **opts) if x == 0: shutil.move('callgraph.dot', output_file) return x
def optimize(input_file, output_file, **opts): return run(config.LLVM['opt'], ['-disable-simplify-libcalls', input_file, '-o', output_file, '-O3'], **opts)
return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 # millisecs if self.verbose: print 'elapsed time: %f ms' % self.msecs if __name__ == '__main__': # Call the AnnTools pipeline if len(sys.argv) > 1: input_file_name = sys.argv[1] with Timer() as t: driver.run(input_file_name, 'vcf') print "input file is " + input_file_name print "Total runtime: %s seconds" % t.secs # Save results file and log file to S3 results bucket conn = boto.connect_s3() k = Key(conn.get_bucket('gas-results', validate=False)) # copy all three files to the bucket k.key = 'lyc/' + input_file_name k.set_contents_from_filename(input_file_name) k.key = 'lyc/' + input_file_name.split('.')[0] + '.annot.vcf' k.set_contents_from_filename( input_file_name.split('.')[0] + '.annot.vcf') k.key = 'lyc/' + input_file_name + '.count.log' k.set_contents_from_filename(input_file_name + '.count.log')
self.secs = self.end - self.start self.msecs = self.secs * 1000 # millisecs if self.verbose: print("Total runtime: {0:.6f} seconds".format(self.secs)) if __name__ == '__main__': # Call the AnnTools pipeline if len(sys.argv) > 3: input_path = sys.argv[1] s3_key = sys.argv[2] job_id = sys.argv[3] print(input_path) print(s3_key) print(job_id) with Timer() as t: driver.run(input_path, 'vcf') print("Total runtime: %s seconds" % t.secs) result_file_path = (input_path+'.annot').replace('.vcf.annot', '.annot.vcf') result_key = (s3_key+'.annot').replace('.vcf.annot', '.annot.vcf') log_file_path = input_path+'.count.log' log_key = s3_key+'.count.log' s3 = boto3.resource('s3', region_name='us-east-1') # Upload the results file try: s3.meta.client.upload_file(result_file_path, 'gas-results', result_key) print("result file uploaded") except Exception as e: print(e.__doc__)
def find(x, robot): # parse args # parser = argparse.ArgumentParser(description='Navigate the robot to a given location') # parser.add_argument('path', metavar='p', type=str, help='path to DOT file') # parser.add_argument('coord', metavar='c', type=str, help='goal position') # parser.add_argument('start', metavar='s', nargs = "?", type=str, help='goal position') # args = parser.parse_args() r = robot # read in all vertices file = open('graph.dot', 'r') goal_coords = x print('goal coords ' + str(goal_coords)) #dict to hold the dicts adj_matrix = {} next(file) for line in file: # check for the newline if line == '\n': break # read in the vertex line_parts = line.split('[') label = line_parts[0].strip(' ') coords = line_parts[1].split('"')[1].split(",") x = coords[0].strip('(') y = coords[1].strip(')') # create the vertex and add if not already in v = Vertex(label, x, y) adj_matrix[label] = v # read in all edges for line in file: #eof if not '--' in line: continue # split on edge edge = line.split('--') first_node = edge[0].strip(' ') second_node = edge[1].strip().replace(';', '') '' # get nodes from dictionary, add other node to each nodes dict v = adj_matrix[first_node] s = adj_matrix[second_node] v.adj_nodes[second_node] = v.distance(s.x, s.y) s.adj_nodes[first_node] = s.distance(v.x, v.y) # create goal node coords = goal_coords.split(',') x_goal = coords[0].strip('[') y_goal = coords[1].strip(']') # add all distances for goal goal_vertex = Vertex("Goal", x_goal, y_goal) find_closest(goal_vertex, adj_matrix) #robot get mcl pose xy = r.getMCLPose() x_goal = xy[0] #robot x coord y_goal = xy[1] #robot y coord # make vertex and get closest start = Vertex("Start", x_goal, y_goal) find_closest(start, adj_matrix) # make matrix #print(adj_matrix['1a'].adj_nodes['2a']) #print(adj_matrix["1b"].adj_nodes["Goal"]) path = dij.dijkstra(adj_matrix, start.label, goal_vertex.label) path = path.split(' ') for name in path: node = adj_matrix[name] driver.run(node.x, node.y, r) r.drive(0, 0)
return (t2-t1), (t3-t2), (t4-t3), (t5-t4) psets = [[100, 5, 40], [1000, 5, 40], [2000, 5, 40], [10000, 5, 40], [100, 11, 40], [1000, 11, 40], [2000, 11, 40], [10000, 11, 40], [100, 11, 20], [1000, 11, 20], [2000, 11, 20], [10000, 11, 20], [100, 11, 10], [1000, 11, 10], [2000, 11, 10], [10000, 11, 10]] reps = 10 ntests = 4 clrs = ["black", "red", "blue", "green"] fname = "clstQFrm" pname = ["N", "k", "M"] _drvr.run(psets, reps, ntests, pname, clrs, fname, doset) """ L = len(psets) reps = 10 scrs = _N.empty((L, reps, 4)) clrs = ["black", "red", "blue", "green"] ls = ["-", "-", "--", "--"] p = -1 for pset in psets: p += 1 for r in xrange(reps): scrs[p, r] = doset(pset)
self.start = time.time() return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 # millisecs if self.verbose: print "Elapsed time: %f ms" % self.msecs if __name__ == '__main__': # Call the AnnTools pipeline if len(sys.argv) > 1: input_file_name = sys.argv[1] with Timer() as t: driver.run(input_file_name, 'vcf') print "Total runtime: %s seconds" % t.secs log_path = input_file_name + ".count.log" log_name = input_file_name.split('/')[1] + ".count.log" res_path = input_file_name.split('.')[0] + ".annot.vcf" res_name = input_file_name.split('.')[0].split('/')[1] + ".annot.vcf" #http://boto3.readthedocs.org/en/latest/reference/services/s3.html#S3.Client.upload_file s3.meta.client.upload_file(log_path, 'gas-results', "songty/" + log_name) s3.meta.client.upload_file(res_path, 'gas-results', "songty/" + res_name) #https://docs.python.org/2/library/os.html#os.remove os.remove(input_file_name) os.remove(log_path) os.remove(res_path)
print "usage: python segment.py input_image output_image" sys.exit(-1) input_image = args[1] output_image = args[2] # Read in image original_image = img.imread(input_image) height, width, channels = original_image.shape print "Processing image (%dx%d)..." % (width, height) # Convert image to graph in adjacency list format image_as_graph_path = "tmp/image_graph.txt" ip.convert(input_image, image_as_graph_path) # Find the cut using MapReduce max-flow implmentation max_flow, cut = driver.run(image_as_graph_path) # gererate and output segmented image segmented_image = np.zeros((height, width)) for i in xrange(height): for j in xrange(width): my_id = str(i * width + j) # my_id is reachable from s if my_id in cut: segmented_image[i, j] = 255 # my_id is not reachable from s else: segmented_image[i, j] = 0 img.imsave("output/{0}".format(output_image), segmented_image, cmap="gray")
def main(argv=sys.argv): try: args = _parse_args(argv) run(args) except SystemExit, e: return e
def call_driver(): sel=[] selection = lstbox.curselection() for i in selection: sel.append(lstbox.get(i)) driver.run(sel,file,pref2.get(),gis.get(),file2,engine.get(),ladder.get(),rescue.get(),other.get(),overtime.get(),overcount.get(),erf.get(),chief.get(),enforce.get(),erf_out.get())