def rank_with_paired_nn(dicta, queries, model_path, model_opt_path): """ Rank clean dictionary chunks for each noisy query chunk using the paired (clean,noisy) input neural network """ with open(model_path, 'rb') as fmodel: ml = load(fmodel) model_func = ml.model.get_theano_function() npz = np.load(model_opt_path) queries = queries.astype(np.float32) dicta = dicta.astype(np.float32) T = queries.shape[0] D = queries.shape[0] sim = np.zeros((D, T)) for t in xrange(T): sys.write('.') if t % 70 == 0 and t: sys.write('\n') qrep = repmat(queries[t, :], D, 1) test_x = np.hstack([dicta, qrep]) test_x = mean_var_normalize_test(test_x, npz['train_mean'], npz['train_std']) sim[:, t] = model_func(test_x) best_chunks = np.argmax(sim, axis=0) return best_chunks
def handle_data(self, message): payload = msgpack.unpackb(message) if 'connect' in payload: host, port_str = payload['connect'].split('::') port = int(port_str) self._sock = socket.socket() self._sock.connect((host, port)) response = msgpack.packb({'type': 'STATUS', 'value': 'OPEN'}) self._send(response) eventlet.spawn(self._reader) elif 'close' in payload: self._sock.close() status = msgpack.packb({'type': 'STATUS', 'value': 'CLOSED'}) self._send(status) elif 'content' in payload: try: self._sock.send(payload['content']) except Exception as exc: print exc self._sock.close() else: message = 'Unknown payload type: {}\n'.format(payload.type) sys.write(message)
def main(): global possible_subnet_list global targets global port global args # Check if they want to use a file of addresses if(args.addfile == None): # Calculate non-file address if(args.address == None): with print_lock: sys.stderr.write(bad_n + "need an address to work with (-a)\nExiting...\n") exit(403) else: # Calculate addresses populate_address(args.address) # Use address file instead else: try: with open(args.addfile,"r") as targs: with print_lock: sys.stdout.write(notify + "running with address file!\n") for line in targs: populate_address(line) targs.close() except: with print_lock: sys.write(bad + "could not open that file.") exit() # Remove blank list entries. <3 nano users possible_subnet_list = list(filter(None, possible_subnet_list)) # EXIT FOR TESTING if(args.test == True): print(possible_subnet_list) exit() # Feed the thread monsters for address_index in range(0,len(possible_subnet_list)): q.put(address_index) q.join() # Report if no hosts were up if None in targets: with print_lock: sys.stderr.write(bad_n + "No SSH found!\nExiting...\n") exit(404) if(args.o != None): with print_lock: sys.stdout.write(notify + "Writing found targets to {}\n".format(args.o)) f = open(args.o,"w+") for t in targets: f.write(t + "\n") f.close() with print_lock: sys.stdout.write("\nThat's all I can do!\n") print("Attack lasted {:.2f} seconds!".format(time.time() - start))
def check_size(self, out, image): value = self.config_entry_image.get('check-size') if not value: return 0 dtb_size = 0 if self.config_entry_image.get('check-size-with-dtb'): for dtb in glob.glob( os.path.join(self.dir, 'arch', self.config_entry_base['kernel-arch'], 'boot/dts/*.dtb')): dtb_size = max(dtb_size, os.stat(dtb).st_size) size = os.stat(image).st_size + dtb_size if size > value: out.write('Image too large (%d > %d)! Refusing to continue.\n' % (size, value)) return 1 # 1% overhead is desirable in order to cope with growth # through the lifetime of a stable release. Warn if this is # not the case. usage = (float(size)/value) * 100.0 out.write('Image size %d/%d, using %.2f%%. ' % (size, value, usage)) if size > value: sys.write('Too large. Refusing to continue.\n') return 1 elif usage >= 99.0: out.write('Under 1%% space in %s. ' % self.changelog.distribution) else: out.write('Image fits. ') out.write('Continuing.\n') return 0
def main(): usage = "usage: %prog [options] bamfile \n generate a shell script that generates locus specific BAM file of a certain window size around bed coordinates for a given orignal BAM file\n\n" parser = OptionParser(usage) parser.add_option("--bedfile", type="string", dest="bedfile", help="bedfile") parser.add_option("--upstreampad", type="int", default=100, dest="upstream", help="upstream" ) parser.add_option("--downstreampad", type="int", default=100, dest="downstream", help="downstream" ) parser.add_option("--ref", type="string", default="human_reference_v37.fa", dest="ref", help="name of reference assembly (fasta) file (.fa)") parser.add_option("--bamprefix", type="string", default=None, dest="bamprefix", help="output prefix of bam file") (options, args)=parser.parse_args() if options.bamprefix == None: sys.write("please provide a value to --bamprefix option!\n ") sys.exit(1) bamfile=args[0] if os.path.isfile(bamfile) == False: sys.stderr.write("bam file doesn't exists! " + bamfile +"\n") sys.exit(1) bamindexfile=bamfile+".bai" if os.path.isfile(bamindexfile) == False: sys.stderr.write("bam index file doesn't exist! did you run samtools/bamtools index?\n") sys.exit(1) if os.path.isfile(options.bedfile) == False: sys.stderr.write("bed file doesn't exist! " + options.bedfile + "\n") exit(1) bam = pysam.Samfile( bamfile, "rb" ) bedfh=open(options.bedfile, 'r') for coord_tuple in yield_bedcoordinate(bedfh): (chr, start, end)=coord_tuple regionstring=chr+":"+start+".."+end regionstring=":".join( [ chr, str( int(start)-options.upstream), str( int(end)+options.downstream ) ]) bamfilename=".".join( [ options.bamprefix, regionstring, 'bam' ] ) #now create a bamfile for the region defined by the bed coordinate, plus/minus the pad #the bamfile is names as bamfileprefix.regionstring.bam outbam = pysam.Samfile(bamfilename, "wb", template=bam) print coord_tuple print chr, str( int(start)-options.upstream) , str( int(end)+options.downstream ) #now for each aligned read in the region, write it to the bam file for alignedread in bam.fetch( chr, int(start)-options.upstream, int(end)+options.downstream ): if alignedread.is_paired: outbam.write(alignedread) outbam.close() bam.close()
def get(self): prefix = self.get_argument('q') if prefix is None: sys.stderr.write('[ERROR] no name') sys.write('error') return sugg_info = _sugg_server.get_sugg(prefix.encode('utf-8')) self.render("sugg.html", info=sugg_info)
def main(): parser = argparse.ArgumentParser(description="") parser.add_argument('-compile_commands', default='compile_commands.json', help='location of binary to use for clang-format') parser.add_argument('-binary', help='location of binary to use for clang-format') parser.add_argument('-output', default='clang-format-result.diff', help='output file') args = parser.parse_args() if not args.binary: for ext in ["", "-6.0", "-7.0", "-8.0", "-9.0"]: if test_cmd(["clang-format" + ext, "--version"]): args.binary = "clang-format" + ext break else: sys.stdout.write( "Could not find clang format please use the '-binary'\n") sys.exit(1) with open(args.compile_commands) as f: data = json.load(f) with open(args.output, 'w') as out: for item in data: filename = item['file'] command = [args.binary, filename] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) formatted_code, err = p.communicate() if p.returncode != 0: sys.write(err) sys.exit(p.returncode) with open(filename) as f: code = f.read().split('\n') diff = difflib.unified_diff(code, formatted_code.split('\n'), filename + " (original)", filename + " (formatted)", '', '', 3, "") diff_string = '\n'.join(diff) if len(diff_string) > 0: sys.stdout.write("\n\nWarning: Inconsistent format " + filename + "\n") sys.stdout.write(diff_string) out.write("\n\nWarning: Inconsistent format " + filename + "\n") out.write(diff_string)
def parse_mod_data(args): if VERBOSE: sys.stderr.write('Reading megalodon data\n') try: mod_dat = pd.read_csv(mh.get_megalodon_fn(args.megalodon_results_dir, mh.PR_MOD_TXT_NAME), sep='\t') except FileNotFoundError: sys.write('ERROR: Must provide a valid Megalodon result directory.') sys.exit(1) return mod_dat
def printMap(nMap): for i in size(nMap, 0) : for j in size(nMap, 1): if nMap[i,j] == 0: sys.write('*') elif nMap[i,j] == 1: sys.write('c') else: sys.write('.') sys.write('\n') sys.write('\n')
def pcToSystem(file): #Recibimos el archivo ya abierto listo para ser leído desde nuestra pc sys=open("fiunamfs.img","rw") sys.seek(1024) for i in range(64): if sys.read(15)=='AQUI_NO_VA_NADA': #Leemos un espacio vacío #Leemos el inicio del espacio sys.seek(sys.tell()+10) ini=int(sys.read(5)) sys.write(file.read()) #Copiamos la informacion fin=int(sys.tell()) sys.seek(ini) sys.seek(sys.tell()-14) sys.write(ini-fin) #guardamos el tamaño del archivo else: sys.seek(sys.tell()+49) sys.close() file.close()
def systemToPC(nombre, ruta): #recibimos el nombre del archivo y la ruta sys=open("fiunamfs.img","rw") sys.seek(1024) for i in range(64): if sys.read(15)==nombre: #Leemos un espacio vacío #Leemos el inicio del espacio sys.seek(sys.tell()+10) ini=int(sys.read(5)) #Crear objeto archivo nuevo en la ruta dada y copiar en él la información fin=int(sys.tell()) sys.seek(ini) sys.seek(sys.tell()-14) sys.write(ini-fin) #guardamos el tamaño del archivo else: sys.seek(sys.tell()+49) sys.close() file.close()
def parse_jobs(self, jobs_str): job_info = {} job_strs = jobs_str.split(' ') for job_str in job_strs: if '(' in job_str: job_id, info_str = job_str.split('(') job_info[job_id] = {} info_strs = info_str[:-2] for info_str in info_strs.split(','): try: key, value = info_str.split('=') job_info[job_id][key] = value except ValueError: if self._is_verbose: msg = '### warnig: no value for {0}\n' sys.write(msg.format(key)) job_info[job_id][info_str] = None return job_info
def paste( self, fname, req_title=None, req_language=None, req_password=None, req_private=None, req_expire=30, req_project=None, ): fargs = locals() endpoint = "/create" params = {"language": "text", "title": os.path.basename(fname)} try: fp = open(fname, "r") params["data"] = fp.read() # ugh, how about some cap hu? except Exception, e: sys.write(sys.stderr, "Failed to open paste file {}: {}".format(fname, e)) return
def doc_changed(self, *args): if self.live_coding_enabled and self.bot: doc = self.window.get_active_document() source = self.get_source(doc) try: self.bot.live_source_load(source) except Exception: self.bot = None self.disconnect_change_handler(doc) raise except IOError as e: self.bot = None self.disconnect_change_handler() if e.errno == errno.EPIPE: # EPIPE error sys.write('FIXME: %s\n' % str(e)) else: # Something else bad happened raise
def run_jobs(self, stream=sys.stdout, resubmit_failed=False): """Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() self._interface._dry_run = self.args['dry_run'] scatter_status = self._interface.submit_jobs( self.scatter_link, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True) else: sys.stdout.write( "NOT resubmitting partially failed link %s\n" % self.full_linkname) return status_vect
def paste(self, fname, req_title=None, req_language=None, req_password=None, req_private=None, req_expire=30, req_project=None): fargs = locals() endpoint = '/create' params = { 'language': 'text', 'title': os.path.basename(fname), } try: fp = open(fname, 'r') params['data'] = fp.read() # ugh, how about some cap hu? except Exception, e: sys.write(sys.stderr, 'Failed to open paste file {}: {}'.format(fname, e)) return
def run_jobs(self, stream=sys.stdout, resubmit_failed=False): """Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() self._interface._dry_run = self.args['dry_run'] scatter_status = self._interface.submit_jobs(self.scatter_link, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) return status_vect
def check_size(self, out, image): value = self.config_entry_image.get('check-size') if not value: return 0 dtb_size = 0 if self.config_entry_image.get('check-size-with-dtb'): for dtb in glob.glob( os.path.join(self.dir, 'arch', self.config_entry_base['kernel-arch'], 'boot/dts/*.dtb')): dtb_size = max(dtb_size, os.stat(dtb).st_size) size = os.stat(image).st_size + dtb_size if size > value: out.write('Image too large (%d > %d)! Refusing to continue.\n' % (size, value)) return 1 # 1% overhead is desirable in order to cope with growth # through the lifetime of a stable release. Warn if this is # not the case. usage = (float(size) / value) * 100.0 out.write('Image size %d/%d, using %.2f%%. ' % (size, value, usage)) if size > value: sys.write('Too large. Refusing to continue.\n') return 1 elif usage >= 99.0: out.write('Under 1%% space in %s. ' % self.changelog.distribution) else: out.write('Image fits. ') out.write('Continuing.\n') return 0
def main(): usage = "usage: %prog [options] bamfile \n generate a shell script that generates locus specific BAM file of a certain window size around bed coordinates for a given orignal BAM file\n\n" parser = OptionParser(usage) parser.add_option("--bedfile", type="string", dest="bedfile", help="bedfile") parser.add_option("--upstreampad", type="int", default=100, dest="upstream", help="upstream") parser.add_option("--downstreampad", type="int", default=100, dest="downstream", help="downstream") parser.add_option("--ref", type="string", default="human_reference_v37.fa", dest="ref", help="name of reference assembly (fasta) file (.fa)") parser.add_option("--bamprefix", type="string", default=None, dest="bamprefix", help="output prefix of bam file") (options, args) = parser.parse_args() if options.bamprefix == None: sys.write("please provide a value to --bamprefix option!\n ") sys.exit(1) bamfile = args[0] if os.path.isfile(bamfile) == False: sys.stderr.write("bam file doesn't exists! " + bamfile + "\n") sys.exit(1) bamindexfile = bamfile + ".bai" if os.path.isfile(bamindexfile) == False: sys.stderr.write( "bam index file doesn't exist! did you run samtools/bamtools index?\n" ) sys.exit(1) if os.path.isfile(options.bedfile) == False: sys.stderr.write("bed file doesn't exist! " + options.bedfile + "\n") exit(1) bam = pysam.Samfile(bamfile, "rb") bedfh = open(options.bedfile, 'r') for coord_tuple in yield_bedcoordinate(bedfh): (chr, start, end) = coord_tuple regionstring = chr + ":" + start + ".." + end regionstring = ":".join([ chr, str(int(start) - options.upstream), str(int(end) + options.downstream) ]) bamfilename = ".".join([options.bamprefix, regionstring, 'bam']) #now create a bamfile for the region defined by the bed coordinate, plus/minus the pad #the bamfile is names as bamfileprefix.regionstring.bam outbam = pysam.Samfile(bamfilename, "wb", template=bam) print coord_tuple print chr, str(int(start) - options.upstream), str(int(end) + options.downstream) #now for each aligned read in the region, write it to the bam file for alignedread in bam.fetch(chr, int(start) - options.upstream, int(end) + options.downstream): if alignedread.is_paired: outbam.write(alignedread) outbam.close() bam.close()
""" brace_close : RBRACE """ self._pop_scope() p[0] = p[1] def p_empty(self, p): 'empty : ' p[0] = None def p_error(self, p): if p: self._parse_error('before: %s' % p.value, self._coord(p.lineno)) else: self._parse_error('At end of input', '') if __name__ == "__main__": import pprint import time, sys t1 = time.time() parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False) sys.write(time.time() - t1) buf = ''' int (*k)(int); ''' # set debuglevel to 2 for debugging t = parser.parse(buf, 'x.c', debuglevel=0) t.show(showcoord=True)
def errfoo(msg, a, b): sys.write(msg + "\n") sys.exit()
def shell(parenthed): global namespace command = parenthed[0] argv = parenthed[1:] if command == "+" and len(argv) == 1: return argv[0] elif command == "+" and len(argv) == 2: return str(int(argv[0])+int(argv[1])) elif command == "-" and len(argv) == 1: return str(-int(argv[0])) elif command == "-" and len(argv) == 2: return str(int(argv[0])-int(argv[1])) elif command == "*" and len(argv) == 1: return namespace[argv[0]] elif command == "*" and len(argv) == 2: return str(int(argv[0])*int(arg[0])) elif command == "/" and len(argv) == 2: return str(int(int(argv[0])/int(argv[0]))) elif command == "%" and len(argv) == 1: return str(abs(int(argv[0]))) elif command == "%" and len(argv) == 2: return str(int(argv[0])%int(argv[1])) elif command == "=" and len(argv) == 2: return str(int(argv[0]==argv[1])) elif command == "!=" and len(argv) == 2: return str(int(argv[0]!=argv[1])) elif command == ">" and len(argv) == 2: return str(int(int(argv[0])>int(argv[1]))) elif command == ">=" and len(argv) == 2: return str(int(int(argv[0])>=int(argv[1]))) elif command == "<" and len(argv) == 2: return str(int(int(argv[0])<int(argv[1]))) elif command == "<=" and len(argv) == 2: return str(int(int(argv[0])<=int(argv[1]))) elif command == "&" and len(argv) == 2: return str(int(argv[0])&int(argv[1])) elif command == "|" and len(argv) == 2: return str(int(argv[0])|int(argv[1])) elif command == "^" and len(argv) == 2: return str(int(argv[0])^int(argv[1])) elif command == "!" and len(argv) == 1: return str(int(not bool(int(argv[0])))) elif command == "~" and len(argv) == 1: return str(~int(argv[0])) elif command == "~" and len(argv) == 2: return argv[0]+argv[1] elif command == ":=" and len(argv) == 2: s = argv[1] namespace[argv[0]] = argv[1] return s elif command == "'" and len(argv) == 1: return chr(int(argv[0])) elif command == "@" and len(argv) == 1: return str(ord(argv[0])) elif command == "@" and len(argv) == 2: return argv[0].split("`")[int(argv[1])] elif command == "?" and len(argv) == 3: if bool(int(argv[0])): return argv[1] else: return argv[2] elif command == "if" and len(argv) == 2: if bool(int(argv[0])): return run(argv[1]) elif command == "ifelse" and len(argv) == 3: if bool(int(argv[0])): return run(argv[1]) else: return run(argv[2]) elif command == "while" and len(argv) == 2: while True: s = run(argv[0]) t = run(argv[1]) if bool(int(s)): return t elif command == "do" and len(argv) == 1: return run(argv[0]) elif command == "write" and len(argv) == 1: import sys sys.write(argv[0]) return "" elif command == "writeln" and len(argv) == 1: print(argv[0]) return "" elif command == " ": return "`".join(argv) elif command == "@:=" and len(argv) == 3: s = argv[0].split("`") s[int(argv[1])] = argv[2] return "`".join(s) elif command == "~~" and len(argv) == 2: return argv[0]+"`"+argv[1] elif command == "exit" and len(argv) == 0: import sys sys.exit() elif command == "#" and len(argv) == 1: return len(argv[0]) elif command == "##" and len(argv) == 1: return len(argv[0].split("`")) elif command == "]" and len(argv) == 2: s = open(argv[1],"w") s.write(argv[0]+"\n") s.close() return argv[0]+"\n" elif command == "]." and len(argv) == 2: s = open(argv[1],"w") s.write(argv[0]) s.close() return argv[0] elif command == "[" and len(argv) == 1: s = open(argv[0],"r") txt = s.read() s.close() return txt elif command == "]]" and len(argv) == 2: s = open(argv[1],"a") s.write(argv[0]+"\n") s.close() return argv[0]+"\n" elif command == "]]." and len(argv) == 2: s = open(argv[1],"a") s.write(argv[0]) s.close() return argv[0] elif command == "cd" and len(argv) == 0: import os return os.getcwd() elif command == "cd" and len(argv) == 1: import os os.chdir(argv[0]) return os.getcwd() elif command == "ls": import os return '`'.join(os.listdir(os.getcwd())) elif command == "readln": try: return raw_input() except: return input()
def my_error_func(msg, a, b): sys.write(msg + "\n") sys.exit()
args.add_argument('builddir', nargs = '?', default = '.') args.add_argument('--cxxflags', default = '') args.add_argument('--debug', action = 'store_true') args.add_argument('--testpath', help = 'PATH containing llvm-lit and FileCheck') args.add_argument('--withtests', action = 'store_true') args = args.parse_args() bootstrap = os.path.realpath(sys.argv[0]) builddir = os.path.realpath(args.builddir) if not os.path.exists(builddir): os.makedirs(builddir) if not os.path.isdir(builddir): sys.write("Build directory '%s' is not a directory" % args.builddir) sys.exit(1) # First, let's declare what we actually want to build. cxx_srcs = { 'bin/': ( 'CLIArguments', 'fab', ), 'lib/': ( 'AssertionFailure', 'Bytestream', 'ErrorReport', 'Fabrique', 'FabBuilder', 'Printable', 'SemanticException', 'SourceCodeException', 'SourceLocation', 'SourceRange', 'UserError', 'builtins', 'names', 'strings', ),
""" Wrapper that execute a program and its arguments but reports standard error messages only if the program exit status was not 0 Example: ./stderr_wrapper.py myprog arg1 -f arg2 """ import sys, subprocess, os assert sys.version_info[:2] >= ( 2, 4 ) TRINITY_BASE_DIR = "" if os.environ.has_key('TRINITY_HOME'): TRINITY_BASE_DIR = os.environ['TRINITY_HOME']; else: sys.write("You must set the environmental variable TRINITY_BASE_DIR to the base installation directory of Trinity before running this"); sys.stderr.write("You must set the environmental variable TRINITY_BASE_DIR to the base installation directory of Trinity before running this"); sys.exit(1) # get bindir bindir = sys.argv[0] bindir = bindir.split("/") if len(bindir) > 1: bindir.pop() bindir = "/".join(bindir) else: bindir = "."
def errfoo(msg, a, b): sys.write(msg + '\n') sys.exit()
def classify(train_list, test_list, parent_dir): index_list = {} # initialize matrix matrix = output.Matrix(map_list.index.keys()) for i, c in enumerate(map_list.index.keys()): subdir = str(i + 1) + '-vs-all/' dir = parent_dir + subdir #check if sub directory already exists if not os.path.exists(dir): os.makedirs(dir) train = open(dir + 'train', 'w') index_list[i + 1] = c mapf.write("%s %d\n" % (c, i + 1)) for t in train_list: if re.search(c, t.cls): gold_class = 1 else: gold_class = -1 train.write("%s %d %s\n" % (t.instance, gold_class, t.features)) train.close() test = open(dir + 'test', 'w') for t in test_list: if re.search(c, t.cls): gold_class = 1 else: gold_class = -1 test.write("%s %d %s\n" % (t.instance, gold_class, t.features)) test.close() #run train.txt call_mallet(dir) #create sys_output from test.stdout temp = open(dir + 'stdout', 'r') sys = open(dir + 'sys_output', 'w') for l in temp.readlines(): m = re.match('^(\S+) (\-?\d) (\-?\d):(\S+) (\-?\d):(\S+)', l) if m: sys.write("%s %s %s %s %s %s\n" % \ (m.group(1),m.group(2),m.group(3),m.group(4),m.group(5),m.group(6))) #store test probability into each table for t in test_list: if t.instance == m.group(1): if m.group(3) != '-1': prob = m.group(4) else: prob = m.group(6) t.probs[i + 1] = prob temp.close() sys.close() #assign class with highest probability for t in test_list: final = sorted(t.probs.items(), key=lambda x: float(x[1]), reverse=True) index = final[0][0] sys_class = index_list[index] matrix.set_value(t.cls, sys_class, 1) #output final_sys.write("%s %s" % (t.instance, t.cls)) for cls, prob in final: final_sys.write(" %s %s" % (index_list[cls], prob)) final_sys.write('\n') return matrix
import sys filename = input("enter filename:") fp = open(filename, "w") ch = input("enter string:") while ch != '#': fp.write(ch) sys.write(ch) ch = input() fp.close()
# finally check if dispersion is sane before proceeding currentDispersion = wallClockClient.algorithm.getCurrentDispersion() if currentDispersion > 1000000000*1.0: sys.stderr.write("\n\nWall clock client synced with dispersion +/- %f.3 milliseconds." % (currentDispersion / 1000000.0)) sys.stderr.write("\nWhich is greater than +/- 1 second. Aborting.\n\n") sys.exit(1) print print "Beginning to measure" measurer.capture() # sanity check we are still connected to the CSS-TS server if not syncTimelineClockController.connected and syncTimelineClockController.timelineAvailable: sys.write("\n\nLost connection to CSS-TS or timeline became unavailable. Aborting.\n\n") sys.exit(1) measurer.detectBeepsAndFlashes(dispersionFunc = dispRecorder.dispersionAt) for channel in measurer.getComparisonChannels(): try: index, expected, timeDifferencesAndErrors = measurer.doComparison(channel) print print "Results for channel: %s" % channel["pinName"] print "----------------------------" stats.calcAndPrintStats(index, expected, timeDifferencesAndErrors, cmdParser.args.toleranceSecs[0]) except DubiousInput:
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### # # $Id: regress.py.in,v 1.1 2004/07/31 00:58:05 rivetwa Exp $ # # # regress/regress.py. Generated from regress.py.in by configure. import os, os.path, sys, fnmatch, glob # Check the python version _v = sys.version_info _ver = (int(_v[0]) << 16) + (int(_v[1]) << 8) + int(_v[2]) if _ver < 0x020101: sys.write('You need python >= 2.1.1 to run this program.\n') sys.write('Your python version is:\n%s\n' %(sys.version)) sys.exit(1) # Add modules dir to module search path sys.path.append('modules') import base_test """Main regression test driver program. Test cases are organised into three levels: directories -> modules -> classes. Directories are the most generic and classes are the most specific. This program will search the current directory for subdirectories with names matching 'test_*'. Each matching subdirectory will be searched for python
def lexerErrorFunc(self, msg, a, b): sys.write(msg + '\n') sys.exit()
import csv import sys import math if len(sys.argv) < 2: sys.write("Usage: " + sys.argv[0] + " #column\n") sys.exit(-1) column = int(sys.argv[1]) fd = sys.stdin csv_reader = csv.reader(fd, delimiter=',') previous_value = 0 previous_time = 0 for row in csv_reader: start_sec = float(row[0]) start_nsec = float(row[1]) time = start_sec + (start_nsec * math.pow(10,-9)) value = float(row[column]) diff = (value - previous_value)/(time - previous_time) diff_str = str(diff) row.append(diff_str) output_str = ",".join(row) sys.stdout.write(output_str + "\n") #Update history previous_time = time previous_value = value
import itertools, sys lambda n: sys.write( itertools.takewhile(lambda n: n != 1, itertools.imap(lambda n_ptr: n_ptr[0] = n_ptr[0]/2.0 if n_ptr[0]%2 == 0 else 3*n_ptr[0]+1, [n]))) collatz = lambda n_ptr: n_ptr[0] = n_ptr[0]/2.0 if n_ptr[0]%2 == 0 else 3*n_ptr[0]+1 lambda n: map(lambda n_ptr: n_ptr[0]/2.0 if n_ptr[0]%2 == 0 else 3*n_ptr[0]+1, []) while(collatz([n]) != 1) def docollatz(n): yield n Xraise StopIteration sys.
for j in xrange(len(img[i])): if img[i][j] < th: img2[i][j] = 255 else: img2[i][j] = img[i][j] return img2 tjreq = requests.Session() tjresp1 = tjreq.get(tjurl + search_path) tjresp2 = tjreq.get(tjurl + processo_path) #print tjresp.text html_page1 = bs.BeautifulSoup(tjresp1.text.encode('latin1')) html_page2 = bs.BeautifulSoup(tjresp2.text.encode('latin1')) counter = 1 while not html_page1.find(text=n_processo) or not html_page2.find(text=n_processo): sys.write('attempts: {0}\r'.format(counter)) counter += 1 tjresp1 = tjreq.get(tjurl + search_path) tjresp2 = tjreq.get(tjurl + processo_path) html_page1 = bs.BeautifulSoup(tjresp1.text.encode('latin1')) html_page2 = bs.BeautifulSoup(tjresp2.text.encode('latin1')) #process = html_page.find(text=n_processo) #with open(process + '.html','w') as fp: fp.write( tjresp.text.encode('latin1')) print 'founded after {0} attempts!'.format(counter) process = requests.get(tjurl + processo_path) outfile = 'movimentacao-' + now() + n_processo + '.html' with open(outfile,'w') as fp: fp.write( process.text.encode('latin1') ) print 'printed in {0} after {1} attempts'.format(outfile,counter)
def main(): # Parse CL elements_needed = int(5) args_array = np.array(sys.argv) N_args = len(args_array) assert(N_args == elements_needed) N_jackknife = int(args_array[1]) todo_dir = args_array[2] file_dir = args_array[3] out_dir = args_array[4] if not(os.path.isdir(out_dir)): sys.stderr.write('{} does not exist. Making...\n'.format(out_dir)) cmd='mkdir ' + out_dir os.system(cmd) # load the todo pointing list input_filename = todo_dir + 'todo_list.dat' if not(os.path.isfile(input_filename)): sys.write('Error: {} does not exist. Exiting...\n'.format(input_filename)) sys.exit() sys.stderr.write('Loading pointing info from file {} ...\n'.format(input_filename)) input_file = open(input_filename, 'rb') todo_list = pickle.load(input_file) input_file.close() sys.stderr.write('Jackknifing uniform from {}\n'.format(file_dir)) for p in todo_list: # Uniform files are random upon creation so no need to shuffle filename = file_dir + 'uniform_' + p.ID + '.xyzw.dat' if not(os.path.isfile(filename)): sys.write('Error: {} does not exist. Exiting...\n'.format(filename)) sys.exit() xyzw = np.genfromtxt( filename, skip_header=1 ) # jackknife samples N_uni = len( xyzw ) remain = N_uni % N_jackknife for i in range( N_jackknife ): # Make every sub-sample the same size slice_length = int(N_uni / N_jackknife) lower_ind = i * slice_length upper_ind = lower_ind + slice_length remove_me = np.arange(lower_ind, upper_ind, 1) # Remove slice xyzw_temp = np.delete(xyzw, remove_me, 0) N_temp = len(xyzw_temp) # Output jackknife'd file out_file = out_dir + 'uniform_' + p.ID + '_jk_' + str(i) + '.dat' np.savetxt(out_file, xyzw_temp, fmt='%1.6f') # Add number of elements as first line in file line_prepender(out_file, str(N_temp)) sys.stderr.write('Jackknife sample output to {} . \n\n'.format(out_dir))
"""Serializers""" import sys try: import json except ImportError: try: import simplejson as json except ImportError: sys.write('ERROR: Please install the `json` or `simplejson` module') sys.exit(-1) class JsonSerializer(object): """Simple JSON serializer""" def encode(self, data): return json.dumps(data) def decode(self, data): return json.loads(data)
p[0] = p[1] def p_empty(self, p): 'empty : ' p[0] = None def p_error(self, p): if p: self._parse_error( 'before: %s' % p.value, self._coord(p.lineno)) else: self._parse_error('At end of input', '') if __name__ == "__main__": import pprint import time, sys t1 = time.time() parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False) sys.write(time.time() - t1) buf = ''' int (*k)(int); ''' # set debuglevel to 2 for debugging t = parser.parse(buf, 'x.c', debuglevel=0) t.show(showcoord=True)
def classify(train_list,test_list,parent_dir): index_list = {} # initialize matrix matrix = output.Matrix(map_list.index.keys()) for i,c in enumerate(map_list.index.keys()): subdir = str(i+1) + '-vs-all/' dir = parent_dir + subdir #check if sub directory already exists if not os.path.exists(dir): os.makedirs(dir) train = open(dir + 'train','w') index_list[i+1] = c mapf.write("%s %d\n" % (c,i+1)) for t in train_list: if re.search(c,t.cls): gold_class = 1 else: gold_class = -1 train.write("%s %d %s\n" % (t.instance,gold_class,t.features)) train.close() test = open(dir + 'test','w') for t in test_list: if re.search(c,t.cls): gold_class = 1 else: gold_class = -1 test.write("%s %d %s\n" % (t.instance,gold_class,t.features)) test.close() #run train.txt call_mallet(dir) #create sys_output from test.stdout temp = open(dir + 'stdout','r') sys = open(dir + 'sys_output','w') for l in temp.readlines(): m = re.match('^(\S+) (\-?\d) (\-?\d):(\S+) (\-?\d):(\S+)',l) if m: sys.write("%s %s %s %s %s %s\n" % \ (m.group(1),m.group(2),m.group(3),m.group(4),m.group(5),m.group(6))) #store test probability into each table for t in test_list: if t.instance == m.group(1): if m.group(3) != '-1': prob = m.group(4) else: prob = m.group(6) t.probs[i+1] = prob temp.close() sys.close() #assign class with highest probability for t in test_list: final = sorted(t.probs.items(),key=lambda x:float(x[1]),reverse=True) index = final[0][0] sys_class = index_list[index] matrix.set_value(t.cls,sys_class,1) #output final_sys.write("%s %s" %(t.instance,t.cls)) for cls,prob in final: final_sys.write(" %s %s" %(index_list[cls],prob)) final_sys.write('\n') return matrix
parser.add_argument('-o','--outfile', type=argparse.FileType('w'), help ="MSA output file", required=True) parser.add_argument('-v','--outformat', default="clustal", help ="MSA output format") parser.add_argument('-l','--list', type=argparse.FileType('r'), required = True) args = parser.parse_args() sel_seqs = args.list.read().split() msa_data = AlignIO.read(args.msa_file, args.informat) sel_align = [] found_align = [] for align in msa_data: if align.id in sel_seqs: sel_align.append(align) found_align.append(align.id) AlignIO.write(AlignIO.MultipleSeqAlignment(sel_align), args.outfile, args.outformat) [ sys.write("Not found:\t{}".format(align.id)) for seq_id in found_align if seq_id not in found_align ]
def main(): # Parse CL elements_needed = int(6) args_array = np.array(sys.argv) N_args = len(args_array) assert (N_args == elements_needed) N_jackknife = int(args_array[1]) todo_dir = args_array[2] bins_dir = args_array[3] jk_dir = args_array[4] out_dir = args_array[5] if not (os.path.isdir(out_dir)): sys.stderr.write('{} does not exist. Making...\n'.format(out_dir)) cmd = 'mkdir ' + out_dir os.system(cmd) # load the todo pointing list input_filename = todo_dir + 'todo_list.dat' if not (os.path.isfile(input_filename)): sys.write( 'Error: {} does not exist. Exiting...\n'.format(input_filename)) sys.exit() sys.stderr.write( 'Loading pointing info from file {} ...\n'.format(input_filename)) input_file = open(input_filename, 'rb') todo_list = pickle.load(input_file) input_file.close() # load bin settings bins_filename = bins_dir + 'rbins.ascii.dat' if not os.path.isfile(bins_filename): sys.stderr.write( 'Error: {} does not exist. Exiting...\n'.format(bins_filename)) sys.exit() bins = np.genfromtxt(bins_filename, skip_header=1) N_rbins = len(bins) cleaned_dir = mwu.get_path.get_cleandata_path() scripts_dir = mwu.get_path.get_scripts_path() pairs_dir = scripts_dir + 'pair_count/' exe_dir = './bin/' # Check for dir/file existence if not os.path.isdir(exe_dir): sys.stderr.write( '{} does not exist Making directory...\n'.format(exe_dir)) cmd = 'mkdir ' + exe_dir os.system(cmd) pairs_file = exe_dir + 'pair_count' if not os.path.isfile(pairs_file): sys.stderr.write( '{} does not exist. Compiling...\n'.format(pairs_file)) # find system and use either icc or gcc current_sys = mwu.get_path.get_system() if current_sys == 'bender': cmd = 'bash ' + pairs_dir + 'icc_compile_pair_count.sh ' + pairs_dir elif current_sys == 'Adams-MacBook-Pro-2': cmd = 'bash ' + pairs_dir + 'gcc_compile_pair_count.sh ' + pairs_dir else: raise ValueError('Unrecognized system...\n') os.system(cmd) else: sys.stderr.write('Using already compiled file {}'.format(pairs_file)) # Main loop for p in todo_list: # Make array to store raw dd counts for different jackknife samples counts_jk = np.zeros((N_jackknife, N_rbins)) # Each file will have the exact same norm b/c same N norm = np.zeros(N_rbins) # counting pairs for each jackknife sample and load pairs into array for i in range(N_jackknife): jackknife_filename = jk_dir + 'mock_' + p.ID + '_jk_' + str( i) + '.dat' counts_filename = jk_dir + 'mock_' + p.ID + '_jk_' + str( i) + '.ddcounts.dat' cmd = (pairs_file + ' ' + jackknife_filename + ' ' + bins_filename + ' > ' + counts_filename) os.system(cmd) counts_jk[i, :], norm = np.genfromtxt(counts_filename, unpack=True, usecols=[5, 6]) jk_mean = np.mean(counts_jk, axis=0) jk_std = np.std(counts_jk, axis=0) * np.sqrt(N_jackknife - 1) # Normalize counts jk_mean /= norm jk_std /= norm jk_data = np.column_stack((jk_mean, jk_std)) tol = 1e-8 jk_frac = np.zeros(len(jk_mean)) for i in range(len(jk_frac)): if (jk_mean[i] > tol): jk_frac[i] = jk_std[i] / jk_mean[i] output_filename = jk_dir + 'mean_std_' + p.ID + '.dat' np.savetxt(output_filename, jk_data, fmt='%1.6e') frac_filename = jk_dir + 'frac_err_jk_DD_' + p.ID + '.dat' np.savetxt(frac_filename, jk_frac, fmt='%1.6e')
def generate_bed_line(sam): if sam[:1] == "@": return for i in range(len(sam)): if sam[i].startswith("NM:"): # NM stores the edit distance between read and ref sam_index_space_pos = i elif sam[i].startswith("MD:"): # MD stores the string for mismatching positions sam_mismatches_pos = i elif sam[i].startswith("NH:"): # store number of mappings for that read read_weight = 1.0 / float(sam[i].replace("NH:i:", "")) # CONSTANTS BED_CHR = 0 BED_START = 1 BED_STOP = 2 BED_READ = 3 BED_SCORE = 4 BED_STRAND = 5 BED_MID = 6 SAM_READ_ID = 0 SAM_FLAG = 1 SAM_CHR = 2 SAM_START = 3 SAM_SCORE = 4 SAM_MID = 5 # aka cigar string SAM_STAR = 6 # next entry on same chromosome? SAM__ = 7 # mate info SAM__ = 8 # mate info SAM_READ = 9 SAM_STAR = 10 SAM__ = 42 SAM_INDEX_SPACE = sam_index_space_pos SAM_MISMATCHES = sam_mismatches_pos # 5th digit in SAM_FLAG in binary format tells the strand: 0 -> plus, 1 -> minus #Now reading in the sam_tuples sequentially. #Need to convert this in some kind of bed format MID_length = get_match_length(sam[SAM_MID]) strand = "?" SAM_FLAG_BINARY = '{0:012b}'.format(int(sam[SAM_FLAG])) SAM_STRAND = int(SAM_FLAG_BINARY[::-1][4]) if SAM_STRAND == 0: strand = "+" elif SAM_STRAND == 1: strand = "-" else: sys.write("[ERROR] Could not infer strand from " + SAM_FLAG + "\n") sys.exit(2) bed_line = "" bed_line += sam[SAM_CHR] + "\t" bed_line += str(int(sam[SAM_START]) - 1) + "\t" bed_line += str(int(sam[SAM_START]) - 1 + MID_length) + "\t" bed_line += sam[SAM_READ_ID] + "\t" # the read weight is later on overwritten by the edit distance bed_line += str(read_weight) + "\t" bed_line += strand #At this point, the bed file is almost complete, just needs the MMID #Make a tuple with it! bed = bed_line.split("\t") g_length = MID_length # genome is set as only "?" because we don't know the seqeuence g = "".ljust(g_length, "?") g = list(g) # get the read that was aligned soft_clipped_start = 0 soft_clipped_end = 0 match_splitted = re.split(r'(\d+)', sam[SAM_MID]) match_splitted.pop(0) # re produces a weird '' as elem 0 ... if match_splitted[1] == "S": soft_clipped_start = match_splitted[0] if match_splitted[-1] == "S": soft_clipped_end = match_splitted[-2] # take only the portion of the read that was aligned to the reference # (i.e. omit soft clippings at the start and at the end) last_pos = len(sam[SAM_READ]) - int(soft_clipped_end) r = sam[SAM_READ][int(soft_clipped_start):last_pos] r = list(r) # save the final reference and read sequences G = [] R = [] current_MID = sam[SAM_MID] exploded_MID = re.split(r'(\d+|[A-Za-z])', current_MID) for item in exploded_MID: if item == '': exploded_MID.remove(item) ### Do alignment read vs genome for i in range(int(len(exploded_MID) / 2)): number = int(exploded_MID[i * 2]) type = exploded_MID[i * 2 + 1] # no need to process type "N" at this point # because currently the genomic seq anyways # only consists of "?" if type == "M": # read and reference were aligned for _ in itertools.repeat(None, number): G.append(g.pop(0)) R.append(r.pop(0)) elif type == "I": # nucleotides in read that are not there in the reference for _ in itertools.repeat(None, number): G.append("-") R.append(r.pop(0)) elif type == "D": # nucleotides in reference that are not there in the read for _ in itertools.repeat(None, number): G.append(g.pop(0)) R.append("-") mismatch = sam[SAM_MISMATCHES] mismatch = mismatch.split(":")[-1] mismatch = mismatch.replace("^", "") mismatch_splitted = re.split(r'(\d+|[A-Za-z])', mismatch) for item in mismatch_splitted: if item == '': mismatch_splitted.remove(item) pointer = 0 for c in mismatch_splitted: if c == '': continue # Skip c and this iteration when this happens # Skip the next G[pointer] that is not a dash if c.isdigit(): # perfect match here value = int(c) while value > 0: # skip gaps in the reference because insertions are # not present in the mismatch string if G[pointer] == '-': pointer += 1 else: # go over a region of perfect matches pointer += 1 value -= 1 else: # again # skip gaps in the reference because insertions # are not present in the mismatch string while G[pointer] == '-': pointer += 1 # exchange question mark in the reference # two possible reasons: # 1. mismatch; 2. deletion G.pop(pointer) G.insert(pointer, c) pointer += 1 # for the MMID, track the positions of the mismatches and deletions # final MMID looks like: # 15MATIGDCDT12 (15 matches -> 1 Mismatch( A ref, T in read) -> 1 insertion (G in read) -> 1 deletion (C in reference) -> 1 deletion (T in reference) -> 12 matches match_count = 0 MMID = "" g_index = 0 r_index = 0 for index in range(len(G)): cg = G[index] cr = R[index] if cg == "?": match_count += 1 else: if match_count > 0: MMID += str(match_count) match_count = 0 if cg == "-": MMID += "I" + cr elif cr == "-": MMID += "D" + cg else: MMID += "M" + cg + cr # note by RS 2016-06-01: this increment statement # should not effect the loop as far as I understood python # (was introduced not by me but by Manuel) index += 1 if match_count > 0: MMID += str(match_count) match_count = 0 index_space = sam[SAM_INDEX_SPACE] index_space = index_space.split(":")[-1] new_bed = list(bed) # exchange read weight by edit distance and # define the read weight later on new_bed.pop(4) new_bed.insert(4, index_space) new_bed.append(MMID) new_file_line = "\t".join(new_bed) return new_file_line
""" Wrapper that execute a program and its arguments but reports standard error messages only if the program exit status was not 0 Example: ./stderr_wrapper.py myprog arg1 -f arg2 """ import sys, subprocess, os assert sys.version_info[:2] >= (2, 4) TRINITY_BASE_DIR = "" if os.environ.has_key('TRINITY_HOME'): TRINITY_BASE_DIR = os.environ['TRINITY_HOME'] else: sys.write( "You must set the environmental variable TRINITY_BASE_DIR to the base installation directory of Trinity before running this" ) sys.stderr.write( "You must set the environmental variable TRINITY_BASE_DIR to the base installation directory of Trinity before running this" ) sys.exit(1) # get bindir bindir = sys.argv[0] bindir = bindir.split("/") if len(bindir) > 1: bindir.pop() bindir = "/".join(bindir) else: bindir = "."
# # # This little script offers decryption and verification of the existing # Ethereum wallets, as well as generation of a new wallet. You can use any # utf-8 string as the password, which could provide with better security # against the brute-force attack. # Use at your own risk. # # Example: # python3 ./keytree.py import os import sys if sys.version_info[1] < 7: sys.write("Python should be >= 3.7") sys.exit(1) basedir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, basedir + "/frozen_deps") import re import argparse import hashlib import hmac import unicodedata import json from getpass import getpass as _getpass import bech32 import mnemonic from ecdsa import SigningKey, VerifyingKey, SECP256k1
def run(self): self.out = sys.stdout self.data = [] re_linemarker = re.compile(r"^# (\d+) \"(.+)\"$") re_mnemonic = re.compile("^//\s+MNEMONIC:\s+") re_start = re.compile("^\s*start\s+") try: if self.args.filename is None: f = sys.stdin self.fname = self.args.filename else: f = open(self.args.filename) self.fname = '<stdin>' # comment_queue = [] # uprogram_queue = [] # obj = OrderedDict() block = [] self.linenum = 0 state = 0 for line in f: line = line.strip() self.linenum += 1 if self.args.verbose: print(state, line) # Is this a preprocessor linemarker? m = re_linemarker.match(line) if m: linenum, fname = m.groups() linenum = int(linenum) if state == 0 and re_mnemonic.match(line): state = 1 if block: self.process(block) block = [line] continue if state == 1: block.append(line) if re_start.match(line): state = 2 continue if state == 2: if re_mnemonic.match(line): state = 1 if block: self.process(block) block = [line] continue else: block.append(line) continue # At the end, process any remaining blocks, provided we've seen # documentation comments and micoprograms (state 2). if state == 2 and block: self.process(block) except BrokenPipeError: sys.write("Broken pipe\n") sys.exit(1) self.report()
from xlrd import open_workbook import argparse import sys parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", dest="inputFile", required=True) args = parser.parse_args() wb = open_workbook(args.inputFile) s = wb.sheets()[0] for row in range(s.nrows): values = [] for col in range(s.ncols): values.append(s.cell(row,col).value) sys.write("\t".join(values))
#!/usr/bin/python3.4 import time import sys for i in range(5): # print('\r' ,i) sys.write(1,bytes(format(i,:qw))) sys.stdout.flush() time.sleep(1)