def fastq2sspace(out, fasta, lib, libnames, libFs, libRs, orientations, \ libIS, libISStDev, libreadlen, cores, mapq, upto, minlinks, linkratio, \ sspacebin, verbose, usebwa=False, log=sys.stderr): """Map reads onto contigs, prepare library file and execute SSPACE2""" # get dir variables curdir = os.path.abspath(os.path.curdir) outfn = os.path.basename(out) outdir = os.path.dirname(out) # generate outdirs if out contain dir and dir not exists if outdir: if not os.path.isdir(outdir): os.makedirs(outdir) # get tab files if verbose: log.write("[%s] Generating TAB file(s) for %s library/ies...\n" % (datetime.ctime(datetime.now()),len(libnames)) ) tabFnames = get_tab_files(out, fasta, libnames, libFs, libRs, libIS, libISStDev, libreadlen, \ cores, mapq, upto, verbose, usebwa, log) # generate lib file if verbose: log.write("[%s] Generating library file(s)...\n" % datetime.ctime(datetime.now())) libFn = get_libs(out, lib, libnames, tabFnames, libIS, libISStDev, orientations, libreadlen, verbose, log) # run sspace ## change dir to outdir - sspace output intermediate files always in curdir os.chdir(outdir) CMD = "perl %s -l %s -a %s -k %s -s %s -b %s > %s.sspace.log" cmd = CMD%(sspacebin, os.path.basename(libFn), minlinks, linkratio, os.path.basename(fasta.name), outfn, outfn) if verbose: log.write(" %s\n"%cmd) os.system(cmd) ## change to basal dir os.chdir(curdir)
def process_phylome( phyid,n,species_list,dbs,step,verbose ): """If not species_list, all species of given phylome are taken.""" if verbose: sys.stderr.write( "[%s] Connecting to PhylomeDB...\n" % datetime.ctime(datetime.now()) ) p=_getConnection()#; print p.get_phylomes() #get some neccesary info phylome_seedids=p.get_phylome_seed_ids(phyid)[0] #loading seedids #phylome_seedids=['Phy0039MUB_9999994','Phy0039MUC_9999994','Phy0039MQE_9999994'] if verbose: sys.stderr.write( "[%s] Processing %s seeds from phylome_%s...\n" % ( datetime.ctime(datetime.now()),len(phylome_seedids),phyid ) ) #print header header = "#" # "seedid" for i in range(n): header += "one2one%s\t" % ( i+1, ) header += "consistency score\n" sys.stdout.write( header ) #process seedids i=pI=skipped=positives=0 processed = set() for seedid in phylome_seedids: i += 1 if seedid in processed: skipped += 1 continue #get list of groups A1-B1 A2-B2 and so on groups,t = process_tree( p,phyid,n,species_list,seedid ) #do nothing if no such groups if not groups: continue #format output line line = "" #"%s" % seedid ###here you can add protein id conversion for group in groups: #update processed for protid in group: processed.add( protid ) extids = [] if dbs: extids = get_external( p,protid,dbs ) line += "|".join( [protid,]+extids ) + "," line = line[:-1] + "\t" #get consistency across collateral trees cs,processed = get_consistency( p,t,phyid,seedid,species_list,n,processed ) line += "%.3f\n" % cs #write to stdout sys.stdout.write( line ) positives += 1 #print progress if i>pI: pI+=step sys.stderr.write( " %s / %s\t%s \r" % ( i,len(phylome_seedids),positives ) ) if verbose: sys.stderr.write( "[%s] Processed %s seed proteins (duplicated skipped: %s ). %s homologous groups printed.\n" % ( datetime.ctime(datetime.now()),len(phylome_seedids),skipped,positives ) )
def dataToWiki(jsonstr, deltas, subreddit): '''Take a json file, parse it, and return a list of strings for writing to a wiki page. Also return trruncated top 10 list as well.''' # data = { # 'game_ratings': top, # [ [name, num raters, avg rating], [...], ... ] # 'rating_threshold': min_rating, # 'num_raters': int(len(collections)), # 'total_ratings': sum([len(x) for x in item_ratings.values()]), # 'game_ids': game_ids # {name: bggid, name: bggid ...} # } data = json.loads(jsonstr) # sample line: # |180|[Clue](http://boardgamegeek.com/boardgame/1294)|5.42|**▲1**|205 ret = ['|Rank|Game|Rating|+/-|Raters'] ret.append('|--:|:-----------|---------------:|--:|-----:|') for rank, g in enumerate(data['game_ratings'], 1): ret.append('|{}|[{}](http://boardgamegeek.com/boardgame/{})|{:.2f}|{}|{}'.format( rank, g[0], data['game_ids'][g[0]]['bggid'], g[2], deltas[g[0]], g[1])) ret.append('\n') ret.append('\nA few stats on ratings for this month:') ret.append('\n * Total Raters (guild members with collections): {}'.format(data['num_raters'])) ret.append('\n * Rating Threshold (%5 of raters): {}'.format(data['rating_threshold'])) ret.append('\n * Total Ratings: {}'.format(data['total_ratings'])) ret.append('\n[Previous Month\'s ' 'Rankings](http://reddit.com/r/{}/w/top_10/full_list_prev).'.format(subreddit)) ret.append('\n[Archived Rankings](http://reddit.com/r/{}/w/top_10/archive).'.format(subreddit)) ret.append('\nPosted at {}'.format(datetime.ctime(datetime.now()))) top10 = ret[:12] top10.append('| | |\n| | [more...](/r/{}/w/top_10/full_list) |\n'.format(subreddit)) top10.append('Posted at: {}'.format(datetime.ctime(datetime.now()))) return top10, ret
def send_mail(output, since=None): """Send the log text via mail to the user. Args: output: A list of log lines. since: A datetime object when the collection started. """ if since is None: start = 'beginning of time' else: start = datetime.ctime(since) text = '\n'.join(output) mail = MIMEText(text) mail['Subject'] = config.mail_subject.format( hostname=socket.gethostname(), count=len(output), start=start, end=datetime.ctime(datetime.now())) mail['From'] = config.mail_from try: mail['To'] = config.mail_to except AttributeError: raise JournalWatchError("Can't send mail without mail_to set. " "Please set it either as argument or in " "{}.".format(CONFIG_FILE)) argv = [config.mail_binary] argv += shlex.split(config.mail_args) logging.debug("Sending mail from {} to {}.".format( config.mail_from, config.mail_to)) logging.debug("Subject: {}".format(mail['Subject'])) logging.debug("Calling command {}".format(argv)) p = subprocess.Popen(argv, stdin=subprocess.PIPE) p.communicate(mail_to_bytes(mail))
def main(): """ """ usage = "wget -O- ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz | zcat | %(prog)s [options]" parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog) parser.add_argument("-v", "--verbose", default=False, action="store_true") parser.add_argument("--version", action='version', version='%(prog)s 1.0') parser.add_argument("-i", dest="input", nargs="+", type=file, help="input fasta(s)") parser.add_argument("-o", dest="outdir", default="uniprot", help="output directory [%(default)s]") parser.add_argument("-d", dest="db", default='sprot', type=str, help="database [%(default)s]") parser.add_argument("-t", "--tposition", default=1, type=int, help="position of taxid in file name ie fasta/10.4932.faa.txt [%(default)s]") parser.add_argument("-u", "--tdelimiter", default=".", help="delimiter to get taxa [%(default)s]") parser.add_argument("--accessions", default=0, type=str, help="map only accessions") o = parser.parse_args() if o.verbose: sys.stdout.write( "Options: %s\n" % str(o) ) print "[%s] Loading protein information..." % datetime.ctime(datetime.now()) hash2protid = load_hash2protid(o.input, o.tposition, o.tdelimiter, o.verbose) print "[%s] Processing uniprot..." % datetime.ctime(datetime.now()) outdir = os.path.join(o.outdir, o.db) uniprot2metaphors(outdir, hash2protid, o.accessions, o.verbose)
def main(): usage = "%(prog)s [options]" parser = argparse.ArgumentParser( usage=usage,description=desc,epilog=epilog ) parser.add_argument("-v", dest="verbose", default=False, action="store_true") parser.add_argument("-f", dest="fasta", required=True, type=file, help="genome fasta [mandatory]") parser.add_argument("-k", dest="minlinks", default=5, type=int, help="min number of links [%(default)s]") parser.add_argument("-l", dest="lib", default="", help="lib file [No libs]") parser.add_argument("-o", dest="out", default="sspace_out", help="output basename [%(default)s]") parser.add_argument("-n", dest="libnames", nargs="+", help="libraries names [%(default)s]") parser.add_argument("-1", dest="libFs", nargs="+", type=file, help="libs forward reads [%(default)s]") parser.add_argument("-2", dest="libRs", nargs="+", type=file, help="libs reverse reads [%(default)s]") parser.add_argument("-i", dest="libIS", nargs="+", type=int, help="libs insert sizes [%(default)s]") parser.add_argument("-s", dest="libISStDev", nargs="+", type=float, help="libs IS StDev [%(default)s]") parser.add_argument("-t", dest="orientations", nargs="+", #type=float, help="libs orientations [%(default)s]") parser.add_argument("-c", dest="cores", default=2, type=int, help="no. of cpus [%(default)s]") parser.add_argument("-q", dest="mapq", default=10, type=int, help="min map quality [%(default)s]") parser.add_argument("-u", dest="upto", default=0, type=int, help="process up to pairs [all]") o = parser.parse_args() if o.verbose: sys.stderr.write( "Options: %s\n" % str(o) ) if len(o.libnames)*6 != len(o.libnames)+len(o.libFs)+len(o.libRs)+len(o.libIS)+len(o.libISStDev)+len(o.orientations): parser.error("Wrong number of arguments!") #generate outdirs if out contain dir and dir not exists if os.path.dirname(o.out): if not os.path.isdir( os.path.dirname(o.out) ): os.makedirs( os.path.dirname(o.out) ) #get tab files if o.verbose: sys.stderr.write("[%s] Generating TAB file(s) for %s library/ies...\n" % (datetime.ctime(datetime.now()),len(o.libnames)) ) tabFnames = get_tab_files( o.out,o.fasta,o.libnames,o.libFs,o.libRs,o.libIS,o.libISStDev,o.cores,o.mapq,o.upto,o.verbose ) #generate lib file if o.verbose: sys.stderr.write("[%s] Generating libraries file...\n" % datetime.ctime(datetime.now()) ) libFn = get_libs( o.out,o.lib,o.libnames,tabFnames,o.libIS,o.libISStDev,o.orientations,o.verbose ) #print sspace cmd cmd = "perl /users/tg/lpryszcz/src/SSPACE-BASIC-2.0_linux-x86_64/SSPACE_Basic_v2.0.pl -l %s -a 0.7 -k %s -s %s -b %s > %s.sspace.log" % ( libFn,o.minlinks,o.fasta.name,o.out,o.out ); print cmd os.system( cmd )
def write_log(email_txt): if os.path.exists(os.path.join(FILE_LOCATIONS, 'log.txt')): mode = 'a' else: mode = 'w' with open(os.path.join(FILE_LOCATIONS, 'log.txt'), mode) as ofile: today = datetime.today() if len(email_txt) > 1: ofile.write('{} email sent \n'.format(datetime.ctime(today))) else: ofile.write('{} Nothing to report \n'.format(datetime.ctime(today)))
def get_tab_files(outdir, reffile, libNames, fReadsFnames, rReadsFnames, inserts, iBounds, libreadlen, \ cores, mapqTh, upto, verbose, usebwa=False, log=sys.stderr): """Prepare genome index, align all libs and save TAB file""" ref = reffile.name tabFnames = [] #if max(libreadlen)<=500 and min(libreadlen)>40: if usebwa: _get_aligner_proc = _get_bwamem_proc ref2 = '' # disable lastal else: _get_aligner_proc = _get_snap_proc ref2 = ref # process all libs for libName, f1, f2, iSize, iFrac in zip(libNames, fReadsFnames, rReadsFnames, inserts, iBounds): if verbose: log.write("[%s] [lib] %s\n" % (datetime.ctime(datetime.now()), libName)) # define tab output outfn = "%s.%s.tab" % (outdir, libName) # skip if file exists if os.path.isfile(outfn): log.write(" File exists: %s\n" % outfn) tabFnames.append(outfn) continue # run alignment for all libs out = open(outfn, "w") bwalog = open(outfn+".log", "w") proc = _get_aligner_proc(f1.name, f2.name, ref, cores, verbose, bwalog) # parse botwie output sam2sspace_tab(proc.stdout, out, mapqTh, upto, verbose, log, ref2, cores) # # close file & terminate subprocess out.close() tabFnames.append(outfn) proc.kill() return tabFnames
def logger(self, mssg=""): """Logging function.""" head = "\n%s"%("#"*50,) timestamp = "\n[%s]"% datetime.ctime(datetime.now()) memusage = "[%5i Mb] "%(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024, ) if self.log: self.log.write("".join((head, timestamp, memusage, mssg)))
def format_entry(entry): """Format a systemd log entry to a string. Args: entry: A systemd.journal.Reader entry. """ words = [] if '_SYSTEMD_UNIT' in entry: words.append('U') else: words.append('S') if '__REALTIME_TIMESTAMP' in entry: words.append(datetime.ctime(entry['__REALTIME_TIMESTAMP'])) if 'PRIORITY' in entry: words.append(entry['PRIORITY']) if '_SYSTEMD_UNIT' in entry: words.append(entry['_SYSTEMD_UNIT']) name = '' if 'SYSLOG_IDENTIFIER' in entry: name += entry['SYSLOG_IDENTIFIER'] if '_PID' in entry: name += '[{}]'.format(entry['_PID']) name += ':' words.append(name) words.append(entry.get('MESSAGE', 'EMPTY!')) return ' '.join(map(str, words))
def execute_from_command_line(argv=None): if argv is None: argv = sys.argv # Parse the command-line arguments. parser = OptionParser(usage="""my_spider.py [options] init_url times""") parser.add_option('--version', help='Show Version Information') parser.add_option('-u', '--url', help='Give one URL, Default: http://lizziesky.blogspot.com/', default='http://lizziesky.blogspot.com/') #parser.add_option('-t', '--time', help='Give Time, Default: 10', default='10') #........ options, args = parser.parse_args(argv[1:]) slash_index = options.url[7:].find('/') web_site = options.url[7:7+slash_index] global DIR DIR = DIR + web_site + os.sep print DIR if not os.path.isdir(DIR): os.mkdir(DIR) global LOG LOG = open(DIR + 'log.txt', 'w') LOG.write(datetime.ctime(datetime.now())+'\n') spider = Spider(options.url) try: spider.analyze() except Exception,e: LOG.write('='*20) LOG.write('\nSpider Exit by Exception %s\n\n' % e)
def add_feed(self, URIType, URL): """Add a feed""" self.__sql_add__ = "INSERT INTO feeds (URIType, URL, Active, Added) \ VALUES (\"{0}\", \"{1}\", \"{2}\", \"{3}\")".format(URIType, URL, 1, datetime.ctime(datetime.now())) self.cursor.execute(self.__sql_add__) self.db.commit()
def default(self, obj): if isinstance(obj, Promise): return unicode(obj) if isinstance(obj, datetime): return datetime.ctime(obj) + " " + (datetime.tzname(obj) or 'GMT') return json.JSONEncoder.default(self, obj)
def bam2bigwig(bam, genome, output, strand=None, scaled=True, verbose=1): """Convert BAM to BigWig scaled in reads per million mapped reads.""" # skip if outfile exists if os.path.isfile(output): sys.exit("File exists: %s"%output) # generate faidx if absent faidx = genome+".fai" if not os.path.isfile(faidx): pysam.FastaFile(genome) # altered source from https://pythonhosted.org/pybedtools/_modules/pybedtools/contrib/bigwig.html#bam_to_bigwig #bam_to_bigwig(bam='path/to/bam', genome='hg19', output='path/to/bigwig') if verbose: sys.stderr.write("[%s] Converting BAM to BED...\n"%(datetime.ctime(datetime.now()), )) kwargs = dict(bg=True, split=True, g=faidx) # store strand info if strand in ("+", "-", "pos", "neg"): if strand=="pos": strand="+" elif strand=="neg": strand="-" kwargs['strand'] = strand #store scaling info if scaled: # speed-up using samtools idxstats #readcount = mapped_read_count(bam) readcount = get_mapped(bam, verbose) _scale = 1 / (readcount / 1e6) kwargs['scale'] = _scale # get genome coverage if verbose: sys.stderr.write("[%s] Generating genome coverage\n"%(datetime.ctime(datetime.now()), )) x = pybedtools.BedTool(bam).genome_coverage(**kwargs) cmds = ['bedGraphToBigWig', x.fn, faidx, output] # convert to bigWig if verbose: sys.stderr.write("[%s] Converting BED to bigWig: %s\n"%(datetime.ctime(datetime.now()), " ".join(cmds))) os.system(" ".join(cmds)) # clean-up os.unlink(x.fn)
def WriteCopyrightGeneratedTime(self, out): now = datetime.now() c = """/* Copyright (c) %s The Chromium Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ /* Last generated from IDL: %s. */ """ % (now.year, datetime.ctime(now)) out.Write(c)
def ncbi_fetch(queries, taxids, ignored_taxids, db, rettype, batchSize, retmax, \ queryAdd, verbose): """Fetch from genbank. """ query = '' if queries: query = "(" + " OR ".join(str(q) for q in queries) + ") AND" #add required taxids #(txid33208[Organism] OR txid4932[Organism]) NOT (txid7742[Organism] OR txid9606[Organism]) if taxids: query += ' (' + " OR ".join("txid%s[organism]"%t for t in taxids) + ')' #add ignored taxids if ignored_taxids: query += ' NOT (' + " OR ".join("txid%s[organism]"%t for t in ignored_taxids) + ')' #add last bit of query if queryAdd: query += queryAdd #print query sys.stderr.write( "Query: %s\n" % query ) #get list of entries for given query handle = Entrez.esearch(db=db, term=query, retmax=retmax) giList = Entrez.read(handle)['IdList'] #print info about number of proteins info = "Downloading %s entries from NCBI %s database in batches of %s entries...\n" sys.stderr.write(info % (len(giList), db, batchSize)) #post NCBI query search_handle = Entrez.epost(db, id=",".join(giList)) search_results = Entrez.read(search_handle) webenv,query_key = search_results["WebEnv"], search_results["QueryKey"] #fecth all results in batch of batchSize entries at once for start in range(0, len(giList), batchSize): sys.stderr.write("[%s] %s / %s \r" % (datetime.ctime(datetime.now()), \ start+1, len(giList))) #fetch entries error = 1 #hmmm, this connection catching could be improved while error: try: handle = Entrez.efetch(db=db, rettype=rettype, retmode="text", \ retstart=start, retmax=batchSize, \ webenv=webenv, query_key=query_key) fasta = handle.read() error = 0 except: error += 1 sys.stderr.write(" error %s" % error) #print output to stdout sys.stdout.write(fasta)
def start(self, client, addr): rdata = client.recv(512) if rdata.startswith('FILENAME:'): findex = rdata.rfind('FLAG:') eindex = rdata.rfind('END') filename = rdata[9:findex] flag = rdata[findex+5:eindex] destabspath = self.destDir+os.path.dirname(filename) if not os.path.isdir(destabspath): os.makedirs(destabspath) absfn = os.path.abspath(destabspath+os.sep+os.path.basename(filename)) # 如果标记是C,并且文件存在并大小不为0字节,则放弃传送,用于complete if flag == 'C' and os.path.isfile(absfn) and os.stat(absfn).st_size: client.send('PASS') print '忽略: ', absfn return # 否则如果文件不存在或为0字节,或者有更新标记,则传送数据 elif flag == 'C' or flag == 'U': client.send('STAR') f = open(absfn, 'wb') while 1: rdata = client.recv(1024) if not rdata: break f.write(rdata) self.logFile.write('%s\n保存一个文件:%s\n\n' % (datetime.ctime(datetime.now()), absfn)) print '保存: %s\n' % absfn f.close() # 删除操作,不是真正的删除,只是重命名delete elif flag == 'D': client.send('DELT') if os.path.exists(absfn): print '删除: %s' % absfn self.logFile.write('%s\n删除一个文件:%s\n\n' % (datetime.ctime(datetime.now()), absfn)) os.system('mv %s %s_delete' % (absfn, absfn)) else: pass client.close() self.logFile.write('%s\n关闭客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr))
def Do(self): while 1: try: client, addr = self.server.accept() self.logFile.write("%s\n连接一个客户端来自ip: %s\n\n" % (datetime.ctime(datetime.now()), addr)) # 多个线程 t = threading.Thread(target=self.start, name="server", args=(client, addr)) t.setDaemon(1) t.start() except KeyboardInterrupt: # 关闭连接 self.server.close() self.logFile.write("%s\n关闭服务器端\n\n" % datetime.ctime(datetime.now())) self.logFile.close() exit(0) except Exception, e: print "Exception: %s" % e # 关闭连接 self.server.close() self.logFile.write("%s\n关闭服务器端\n\n" % datetime.ctime(datetime.now())) self.logFile.close()
def create_logger(level=logging.NOTSET): """Create a logger for python-gnupg at a specific message level. :type level: :obj:`int` or :obj:`str` :param level: A string or an integer for the lowest level to include in logs. **Available levels:** ==== ======== ======================================== int str description ==== ======== ======================================== 0 NOTSET Disable all logging. 9 GNUPG Log GnuPG's internal status messages. 10 DEBUG Log module level debuging messages. 20 INFO Normal user-level messages. 30 WARN Warning messages. 40 ERROR Error messages and tracebacks. 50 CRITICAL Unhandled exceptions and tracebacks. ==== ======== ======================================== """ _test = os.path.join(os.path.join(os.getcwd(), 'gnupg'), 'test') _now = datetime.now().strftime("%Y-%m-%d_%H%M%S") _fn = os.path.join(_test, "%s_test_gnupg.log" % _now) _fmt = "%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s" ## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module: logging.addLevelName(GNUPG_STATUS_LEVEL, "GNUPG") logging.Logger.status = status if level > logging.NOTSET: logging.basicConfig(level=level, filename=_fn, filemode="a", format=_fmt) logging.logThreads = True if hasattr(logging,'captureWarnings'): logging.captureWarnings(True) colouriser = _ansistrm.ColorizingStreamHandler colouriser.level_map[9] = (None, 'blue', False) colouriser.level_map[10] = (None, 'cyan', False) handler = colouriser(sys.stderr) handler.setLevel(level) formatr = logging.Formatter(_fmt) handler.setFormatter(formatr) else: handler = NullHandler() log = logging.getLogger('gnupg') log.addHandler(handler) log.setLevel(level) log.info("Log opened: %s UTC" % datetime.ctime(datetime.utcnow())) return log
def start(self, client, addr): rdata = client.recv(512) if rdata.startswith("FILENAME:"): findex = rdata.rfind("FLAG:") eindex = rdata.rfind("END") filename = rdata[9:findex] flag = rdata[findex + 5 : eindex] destabspath = self.destDir + os.path.dirname(filename) if not os.path.isdir(destabspath): os.makedirs(destabspath) absfn = os.path.abspath(destabspath + os.sep + os.path.basename(filename)) # 如果已经存在该文件,判断是否? if flag == "C" and os.path.isfile(absfn) and os.stat(absfn).st_size: client.send("PASS") print "pass: "******"C" or flag == "U": print "start" client.send("STAR") file = open(absfn, "wb") while 1: rdata = client.recv(1024) if not rdata: break file.write(rdata) self.logFile.write("%s\n保存一个文件:%s\n\n" % (datetime.ctime(datetime.now()), absfn)) print "save file: %s\n" % absfn file.close() client.close() self.logFile.write("%s\n关闭客户端来自ip: %s\n\n" % (datetime.ctime(datetime.now()), addr)) elif flag == "D": client.send("DELT") if os.path.exists(absfn): print "delete file: %s" % absfn os.system("mv %s %s_delete" % (absfn, absfn)) else: pass
def Do(self): print 'Press Ctrl+C Stop...' while 1: try: client, addr = self.server.accept() self.logFile.write('%s\n连接一个客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr)) # 多个线程 t = threading.Thread(target=self.start, name='server', args=(client, addr)) t.setDaemon(1) t.start() except KeyboardInterrupt: # Ctrl+C关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close() exit(0) except Exception, e: print 'Exception: %s' % e # 关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close()
def get_mapped(bam, verbose=0): """Return number of mapped reads in BAM file. Create BAM index if not present. """ # generate BAM index if absent if not os.path.isfile(bam+'.bai'): cmd = "samtools index %s"%bam if verbose: sys.stderr.write("[%s] Indexing BAM file: %s\n"%(datetime.ctime(datetime.now()), cmd)) os.system(cmd) # open BAM file sam = pysam.AlignmentFile(bam) return sam.mapped
def __init__(self): #Defaults self.debug = False try: self.dropboxPath = environ['DROPBOX_PATH'] except: self.dropboxPath = '/home/joakim/' self.computer = environ['COMPUTER_NAME'] self.errorlog = self.dropboxPath + 'Data Incubator/Project/jefit/allusers/errorlogs/' + \ self.computer + datetime.ctime(datetime.now()).replace(' ', '_').replace(':','_') + '.txt' #database connection self.setDatabase()
def __init__(self, destDir): """ @param destDir: 目标备份文件夹,源文件按照原路径组织进该目标文件夹,不以'/'结尾的绝对路径,当值为''时表示路径和对方一致,此时有点危险,会把两机子上的同名文件覆盖掉。 """ if destDir.endswith('/'): destDir = destDir[:-1] self.destDir = destDir self.destIP = ''#'192.168.0.57' self.destPort = 7878 timestring = time.strftime("%a_%b_%d_%H_%M_%S_%Y", time.strptime(time.ctime())) self.logFile = open('log_'+timestring, 'w') # indexFile保存源文件到目的文件的一一对应关系 self.indexFile = open('index_'+timestring, 'w') try: # 创建Socket对象,绑定并监听 self.server = socket(AF_INET, SOCK_STREAM) self.server.bind((self.destIP, self.destPort)) self.server.listen(5) self.logFile.write('%s\n创建Socket对象,绑定并监听 IP:%s Port:%s\n\n' % (datetime.ctime(datetime.now()), self.destIP, self.destPort)) except Exception,e: self.logFile.write('%s\nRaise Exception %s\n\n' % (datetime.ctime(datetime.now()), e)) raise
def Do(self): while 1: try: client, addr = self.server.accept() #todo: 多个线程 self.logFile.write('%s\n连接一个客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr)) rdata = client.recv(512) if rdata.startswith('FILENAME:'): tindex = rdata.rfind('TIME:') eindex = rdata.rfind('END') filename = rdata[9:tindex] mtime = float(rdata[tindex+5:eindex]) destabspath = self.destDir+os.path.dirname(filename) if not os.path.isdir(destabspath): os.makedirs(destabspath) self.absfn = os.path.abspath(destabspath+os.sep+os.path.basename(filename)) # 如果已经存在该文件,判断是否更新,是的话覆盖,不是的话,放弃?这边是有问题的 if os.path.isfile(self.absfn): client.send('PASS') print 'pass', self.absfn continue mmtime = os.stat(self.absfn).st_mtime print mmtime, mtime if mmtime == mtime: client.send('PASS') self.logFile.write('%s\n存在未更新文件:%s\n\n' % (datetime.ctime(datetime.now()), self.absfn)) continue print 'start' client.send('STAR') self.file = open(self.absfn, 'wb') while 1: rdata = client.recv(1024) if not rdata: break self.file.write(rdata) self.logFile.write('%s\n保存一个文件:%s\n\n' % (datetime.ctime(datetime.now()), self.absfn)) self.indexFile.write('%s->%s\n' % (filename, self.absfn)) print 'save file: %s\n' % self.absfn self.file.close() client.close() self.logFile.write('%s\n关闭客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr)) except KeyboardInterrupt: # 关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close() self.indexFile.close() exit(0) except Exception, e: print 'Exception: %s' % e # 关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close() self.indexFile.close()
def noaaish2nc(latlon,yearrange,localdir,ncfile,shpfile): """ Reads in the noaa data and spits it out to a netcdf file""" varnames = ['Tair','Pair','Uwind','Vwind','RH','rain','cloud'] # Read in the semi-processed data timestart=yearrange[0] timeend=yearrange[1] data = readall(latlon,[timestart,timeend],localdir) data = dataQC(data,varnames) # Create the dictionary format necessary to parse the data to a grouped netcdf file ncdict=[] for dd in data: for vv in dd.keys(): if vv in varnames: # Start building the dictionary if np.size(dd['Longitude'])>0 or np.size(dd['Latitude'])>0: if dd[vv].has_key('Height'): ele = dd[vv]['Height'] else: ele=0.0 coords = [{'Name':'longitude','Value':dd['Longitude'],'units':'degrees East'},\ {'Name':'latitude','Value':dd['Latitude'],'units':'degrees North'},\ {'Name':'elevation','Value':ele,'units':'metres','positive':'up'},\ {'Name':'time','Value':dd[vv]['Time'],'units':'minutes since 1970-01-01 00:00:00'}] attribs = {'StationID':dd['StationID'],'StationName':dd['StationName'],'Data':dd[vv]['Data'],\ 'coordinates':'time, elevation, longitude, latitude','long_name':dd[vv]['Longname'],\ 'units':dd[vv]['Units'],'coords':coords} ncdict.append({vv:attribs}) else: print 'Station: %s missing coordinate info - skipping'%dd['StationName'] globalatts = {'title':'NCDC/NWS integrated surface hourly observation data',\ 'history':'Created on '+datetime.ctime(datetime.now()),\ 'source':'ftp://ftp.ncdc.noaa.gov/pub/data/ish/'} # Write the output to a netcdf file netcdfio.writePointData2Netcdf(ncfile,ncdict,globalatts) # Write the metadata to a shapefile netcdfio.pointNC2shp(ncfile,shpfile) return ncdict
def main(vartype,bbox,timestart,timeend,ncfile,dbfile=None): varlookup ={'CurrentsActive':'currents','WaterLevelActive':'water_surface_height_above_reference_datum',\ 'Salinity':'sea_water_salinity','Conductivity':'sea_water_electrical_conductivity'} # Find the stations staInfo = stationInfo(vartype) for vv in staInfo.keys(): if staInfo[vv]['lon']>=bbox[0] and staInfo[vv]['lon']<=bbox[1] and staInfo[vv]['lat']>=bbox[2] and staInfo[vv]['lat']<=bbox[3]: print 'Station %s inside lat-lon range.' % staInfo[vv]['name'] else: staInfo.pop(vv) ncdata=[] for ID in staInfo.keys(): print 'Getting %s data from %s' % (vartype,staInfo[ID]['name']) # Grab the station data data = getAllTime(ID,varlookup[vartype],timestart,timeend) ### if len(data)>0: # Convert the output to the format required for the netcdf file if vartype == 'CurrentsActive': ncdatatmp=parseADCPdata(data,staInfo,ID) elif vartype == 'WaterLevelActive': ncdatatmp=parseWaterLev(data,staInfo,ID) ncdata+=ncdatatmp # Write to the output netcdf globalatts = {'title':'US-IOOS observation data',\ 'history':'Created on '+datetime.ctime(datetime.now()),\ 'source':'http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/index.jsp'} netcdfio.writePointData2Netcdf(ncfile,ncdata,globalatts) # Write the metadata to a shapefile #shpfile = ncfile.split('.')[0]+'.shp' #netcdfio.pointNC2shp(ncfile,shpfile) # Update the database #createObsDB(dbfile) if not dbfile == None: print 'Updating database: %s'%dbfile netcdfio.netcdfObs2DB(ncfile,dbfile)
def get_tab_files( outdir,reffile,libNames,fReadsFnames,rReadsFnames,inserts,iBounds,cores,mapqTh,upto,verbose ): """Prepare genome index, align all libs and save TAB file""" #create genome index ref = reffile.name #''' idxfn = ref + ".1.bt2" if not os.path.isfile( idxfn ): cmd = "bowtie2-build %s %s" % (ref,ref) if verbose: sys.stderr.write( " Creating index...\n %s\n" % cmd ) bwtmessage = commands.getoutput( cmd ) ''' idxfn = ref + ".gem" if not os.path.isfile( idxfn ): cmd = "gem-indexer -i %s -o %s" % (ref,ref) if verbose: sys.stderr.write( " Creating index...\n %s\n" % cmd ) bwtmessage = commands.getoutput( cmd )#''' tabFnames = [] #process all libs for libName,f1,f2,iSize,iFrac in zip( libNames,fReadsFnames,rReadsFnames,inserts,iBounds ): if verbose: sys.stderr.write( "[%s] [lib] %s\n" % (datetime.ctime(datetime.now()),libName) ) #define tab output outfn = "%s.%s.tab" % ( outdir,libName ) #skip if file exists if os.path.isfile( outfn ): sys.stderr.write( " File exists: %s\n" % outfn ) tabFnames.append( outfn ) continue out = open( outfn,"w" ) #define max insert size allowed maxins = ( 1.0+iFrac ) * iSize #run bowtie2 for all libs proc = _get_bowtie2_proc( f1.name,f2.name,ref,maxins,cores,upto,verbose ) #proc = _get_gem_proc( f1.name,f2.name,ref,maxins,upto,cores,verbose ) #parse botwie output sam2sspace_tab( proc.stdout,out,mapqTh ) #close file out.close() tabFnames.append( outfn ) return tabFnames
def add_visits(change): data = read_visits() if 'unique' not in data.keys(): data['unique'] = dict() if 'visit' not in data.keys(): data['visit'] = 0 if change: browser_agent = request.get('HTTP_USER_AGENT') data['visit'] += 1 ip = request.get('REMOTE_ADDR') if ip not in data['unique'].keys(): data['unique'][ip] = list() data['unique'][ip].append([dt.ctime(dt.now() + timedelta(hours=5)), browser_agent]) # else: # data['unique'][ip][dt.ctime(dt.now() + timedelta(hours=5))].append(browser_agent) # date fromm string to datetime: dt.strptime(time, "%a %b %d %X %Y") # print(request.get('REMOTE_ADDR'), data) write_visits(data) return data
def update(self): try: files = os.listdir('.') for pathname in files: if re.search('[0-9]', pathname) == None: continue file = open(pathname, 'r') ccns = re.findall('[0-9]{5}', file.read()) file.close() for ccn in ccns: print pathname, ccn datestring = datetime.ctime(datetime.today()) r = remote() data = r.check(ccn) newline = datestring + ' ' + data[0] + ' ' + data[1] + ' ' + data[2] + ' '+ data[3] + ' ' + '%.3f'%((int(data[1])+int(data[3]))*100.0/int(data[2])) + '%' file = open(pathname, 'a') file.write(newline + '\n') except IOError, e: print e, '5'
from datastructures import * from postgres import threaded_conn_pool from fastapi import FastAPI, HTTPException, Query import psycopg2 from starlette.responses import FileResponse import tempfile from datetime import datetime import re # some global defaults limit = 100 author = Author(**dict(family_name="Sheffield", given_name="Nathan", email="*****@*****.**")) study = Study(**(dict(author=author, manuscript="", description="Default study", date=datetime.ctime(datetime.now())))) app = FastAPI() @app.get("/") async def root(): return {"message": "EPISB HUB by Databio lab"} def chr_normalize(chr): if not chr.upper().startswith("CHR"): chr = "chr"+chr.upper() elif (chr.startswith("CHR") or chr.startswith("chr")): chr = "chr" + chr[3:].upper() return chr def pattern_regex_check(pattern:str, what:str): # validate chr input p = re.compile(pattern) return p.match(what)
def Do(self): while 1: try: client, addr = self.server.accept() #todo: 多个线程 self.logFile.write('%s\n连接一个客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr)) rdata = client.recv(512) if rdata.startswith('FILENAME:'): tindex = rdata.rfind('TIME:') eindex = rdata.rfind('END') filename = rdata[9:tindex] mtime = float(rdata[tindex + 5:eindex]) destabspath = self.destDir + os.path.dirname(filename) if not os.path.isdir(destabspath): os.makedirs(destabspath) self.absfn = os.path.abspath(destabspath + os.sep + os.path.basename(filename)) # 如果已经存在该文件,判断是否更新,是的话覆盖,不是的话,放弃?这边是有问题的 if os.path.isfile(self.absfn): client.send('PASS') print 'pass', self.absfn continue mmtime = os.stat(self.absfn).st_mtime print mmtime, mtime if mmtime == mtime: client.send('PASS') self.logFile.write( '%s\n存在未更新文件:%s\n\n' % (datetime.ctime(datetime.now()), self.absfn)) continue print 'start' client.send('STAR') self.file = open(self.absfn, 'wb') while 1: rdata = client.recv(1024) if not rdata: break self.file.write(rdata) self.logFile.write( '%s\n保存一个文件:%s\n\n' % (datetime.ctime(datetime.now()), self.absfn)) self.indexFile.write('%s->%s\n' % (filename, self.absfn)) print 'save file: %s\n' % self.absfn self.file.close() client.close() self.logFile.write('%s\n关闭客户端来自ip: %s\n\n' % (datetime.ctime(datetime.now()), addr)) except KeyboardInterrupt: # 关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close() self.indexFile.close() exit(0) except Exception, e: print 'Exception: %s' % e # 关闭连接 self.server.close() self.logFile.write('%s\n关闭服务器端\n\n' % datetime.ctime(datetime.now())) self.logFile.close() self.indexFile.close()
from datetime import datetime import locale locale.setlocale(locale.LC_ALL,"") now = datetime.now() print(now.year) print(now.month) print(now.day) print(now.microsecond) print(datetime.ctime(now)) print("strftime() yıl alma : ",datetime.strftime(now,"%y" )) print("strftime() tarih alma : ",datetime.strftime(now,"%D")) print("date fonk : ",datetime.strftime(now,"%Y, %B ,%A")) print("TİMESPAN") second = datetime.timestamp(now) print("Now to second : ",second) secondToNow = datetime.fromtimestamp(second) print("Second to now : " ,secondToNow)
from datetime import time import datetime _now = datetime.now() _now = datetime.today() result = datetime.now() result = _now.year result = _now.month result = _now.day result = _now.hour result = _now.minute result = _now.second result = datetime.ctime(_now) result = datetime.strftime(_now, '%Y') result = datetime.strftime(_now, '%X') result = datetime.strftime(_now, '%d') result = datetime.strftime(_now, '%A') result = datetime.strftime(_now, '%B') result = datetime.strftime(_now, '%Y %B %A') t = '15 April 2019 hour 10:12:30' result = datetime.strptime(t, '%d %B %Y hour %H:%M:%S') result = result.year birthday = datetime(1983, 5, 9, 12, 30, 10) result = datetime.timestamp(birthday) # second result = datetime.fromtimestamp(result) # second to datetime
def jas_dump(jaen, fname, source=""): with open(fname, "w") as f: if source: f.write("-- Generated from " + source + ", " + datetime.ctime(datetime.now()) + "\n\n") f.write(jas_dumps(jaen))
def cleanup_tree(tree_root): problem_files = dict() try: rmtree(tree_root) except: ExceptionInfo = collections.namedtuple('ExceptionInfo', 'path file exception') error_idx = [] files_list = [] debug_print('tree_root', tree_root) for root, subdirs, files in os.walk(tree_root, topdown=True): files_list.extend([os.path.join(root, file_) for file_ in files]) try: rmtree(root) except: pass for file_ in files_list: try: os.remove(file_) except FileNotFoundError as excep: filepath, filename = os.path.split(file_) temp_excep = ExceptionInfo(path=filepath, file=filename, exception=excep) problem_files.setdefault('FileNotFoundError', []).append(temp_excep) error_idx.append(0) except PermissionError: try: os.chmod(file_, 0o777) except Exception as excep: filepath, filename = os.path.split(file_) temp_excep = ExceptionInfo(path=filepath, file=filename, exception=excep) problem_files.setdefault('PermissionError', []).append(temp_excep) error_idx.append(1) else: os.remove(file_) except Exception as excep: filepath, filename = os.path.split(file_) temp_excep = ExceptionInfo(path=filepath, file=filename, exception=excep) problem_files.setdefault('Unforeseen', []).append(temp_excep) error_idx.append(2) finally: ErrorMessages = collections.namedtuple( 'ErrorMessages', '''FileNotFoundError PermissionError OtherErrors LogVar''') error_msgs = ErrorMessages( FileNotFoundError= "Some files couldn't be removed because they were not found.", PermissionError= '''Some files couldn't be removed because this user account does not have permission to remove them. Contact the file owner or a system administrator, or use sudo.''', OtherErrors= '''Some files couldn't be removed due to unforeseen problems. Try removing them manually.''', LogVar="For devs: an error log variable has been returned." ) error_idx.append(3) [print(error_msgs[idx]) for idx in error_idx] try: rmtree(tree_root) except Exception as excep: filepath = tree_root temp_excep = ExceptionInfo(path=filepath, file='', exception=excep) problem_files.update({'TreeNotRemoved': temp_excep}) print( '''ERROR: Tree couldn't be removed. This is usually because there are files in the tree that couldn't be removed, and/or you do not have the necessary permissions to remove some or all files and diretories. \n For devs: a log variable has been returned.''') else: print("Tree has been successfully removed.") else: print("Tree has been successfully removed.") project_root = os.path.split(os.path.split(__file__)[0])[0] log_location = os.path.join(project_root, 'logs', 'clean_tree.log') log_contents = ([ '\n', datetime.ctime(datetime.now()), '=' * 10, problem_files ]) print(log_contents, sep='\n') try: with open(log_location, 'w+') as write_obj: os.sys.stdout.writelines(log_contents) return problem_files except: pass
pwd.getpwuid(os.getuid()).pw_name, 'python_installation': 'python.tar.gz', 'atlas_installation': 'atlas.tar.gz', 'executable_filename': 'epistasis_%s.sh' % dataset, 'submit_filename': 'epistasis_%s.sub' % dataset, 'jobs_to_rerun_filename': jobs_to_rerun_filename, 'debug': ['', '--debug'][debug], 'prog_path': prog_path, 'timestamp': datetime.ctime(datetime.now()), 'species': '-s %s' % species, 'maxthreads': '--maxthreads %s' % maxthreads, 'feature_selection': ['', '--feature-selection'][featsel], 'exclude': ['', '--exclude'][exclude], 'condition': ['', '--condition %s' % condition][condition is not None], 'use_memory': memory, 'use_chtc': ['requirements = (Target.PoolName =!= "CHTC")', '']['chtc' in pools], 'use_osg': ['', '+wantGlidein = true']['osg' in pools], 'use_uw': ['', '+wantFlocking = true']['uw' in pools], })
def html_dump(jadn, fname, source=''): with open(fname, 'w') as f: if source: f.write('<!-- Generated from ' + source + ', ' + datetime.ctime(datetime.now()) + '-->\n') f.write(html_dumps(jadn))
kareler = Kareler(5) for i in kareler: print(i) ######### ILERI SEVIYE MODULLER ##########DATE TIME MODUL################### from datetime import datetime import locale locale.setlocale(locale.LC_ALL, '') ####### KONUMA GORE BILGI VERIR print(datetime.now()) x = datetime.now() print(x.year) print(x.month) print(x.day) print(x.hour) print(x.minute) print(datetime.ctime(x)) ## ctime # strftime() print(datetime.strftime(x, '%Y') ) ## yil--'%Y' ,, ay--'%B' ,, gun_ismi--'%A' ,,saat--'%X' ,, gun--'%D' print(datetime.strftime(x, '%A %B')) ## timestamp() --- fromtimestamp() saniye = datetime.timestamp(x) print(saniye) x2 = datetime.fromtimestamp(saniye) print(x2) print(datetime.fromtimestamp(0)) ## BELLI IKI TARIH ARASI FARK BULMAK tarih = datetime(2020, 1, 15) x = datetime.now() print(tarih - x) ## OS MODULU-- isletim sisteminde islemler gerceklestirebilmemizi saglar
from datetime import timedelta # from datetime import time # from datetime import date Info = datetime.now() # Can be written as "Info = datetime.today()" also. # result2 = datetime.date(Info) # result3 = datetime.time(Info) # print(Info.year) # print(Info.month) # print(Info.day) # print(Info.hour) # print(Info.minute) # print(Info.second) result = datetime.ctime(Info) #It gives a little bit more descriptive information result = datetime.strftime(Info, '%Y') #Only year result = datetime.strftime(Info, '%X') #Only time info result = datetime.strftime(Info, '%d') #Only day result = datetime.strftime(Info, '%A') #Only day in string format result = datetime.strftime(Info, '%B') #Only month in string format result = datetime.strftime(Info, '%Y %B %A') # t = '10 August 2020' # day, month, year = t.split() # print(day) # print(month) # print(year) # print(result)
data = server_status['mem']['virtual'] elif name == 'mongodb_mem_mapped': data = server_status['mem']['mapped'] elif name == 'mongodb_mem_mapped_with_journal': data = server_status['mem']['mappedWithJournal'] logger.debug("metric_handler returning: name={} val={}".format( name, data)) return data except Exception as e: logger.exception("metric_handler exception: {}".format(e)) return 0 if __name__ == '__main__': from time import sleep from datetime import datetime metric_init({'time_max': '60'}) while True: print "--- {}".format(datetime.ctime(datetime.utcnow())) print "Conn Current: {}".format(metric_handler('mongodb_conn_current')) print "OP Insert: {}".format( metric_handler('mongodb_op_count_insert')) print "OP Query: {}".format( metric_handler('mongodb_op_count_query')) print "OP Update: {}".format( metric_handler('mongodb_op_count_update')) print "MEM Resident: {}".format(metric_handler('mongodb_mem_resident')) print "MEM Virtual: {}".format(metric_handler('mongodb_mem_virtual')) sleep(1)
def process_phylome(phyid, n, species_list, dbs, step, verbose): """If not species_list, all species of given phylome are taken.""" if verbose: sys.stderr.write("[%s] Connecting to PhylomeDB...\n" % datetime.ctime(datetime.now())) p = _getConnection() #; print p.get_phylomes() #get some neccesary info phylome_seedids = p.get_phylome_seed_ids( phyid )[0] #loading seedids #phylome_seedids=['Phy0039MUB_9999994','Phy0039MUC_9999994','Phy0039MQE_9999994'] if verbose: sys.stderr.write( "[%s] Processing %s seeds from phylome_%s...\n" % (datetime.ctime(datetime.now()), len(phylome_seedids), phyid)) #print header header = "#" # "seedid" for i in range(n): header += "one2one%s\t" % (i + 1, ) header += "consistency score\n" sys.stdout.write(header) #process seedids i = pI = skipped = positives = 0 processed = set() for seedid in phylome_seedids: i += 1 if seedid in processed: skipped += 1 continue #get list of groups A1-B1 A2-B2 and so on groups, t = process_tree(p, phyid, n, species_list, seedid) #do nothing if no such groups if not groups: continue #format output line line = "" #"%s" % seedid ###here you can add protein id conversion for group in groups: #update processed for protid in group: processed.add(protid) extids = [] if dbs: extids = get_external(p, protid, dbs) line += "|".join([ protid, ] + extids) + "," line = line[:-1] + "\t" #get consistency across collateral trees cs, processed = get_consistency(p, t, phyid, seedid, species_list, n, processed) line += "%.3f\n" % cs #write to stdout sys.stdout.write(line) positives += 1 #print progress if i > pI: pI += step sys.stderr.write(" %s / %s\t%s \r" % (i, len(phylome_seedids), positives)) if verbose: sys.stderr.write( "[%s] Processed %s seed proteins (duplicated skipped: %s ). %s homologous groups printed.\n" % (datetime.ctime( datetime.now()), len(phylome_seedids), skipped, positives))
print(suan.hour) print(suan.minute) print(suan.second) print(suan.date()) print(suan.time()) """ """ suan = datetime.now() liste = str(suan.date()).split("-") print("Yıl : {}, Ay : {} , Gun {}".format(liste[0],liste[1],liste[2])) """ """ while True: suan = datetime.now() saniye = suan.second if(saniye == 20): print("saniye 00'a geldi !") break print(suan) time.sleep(1) """ suan = datetime.now() print(datetime.ctime(suan).split(" ")[3].split(":"))
def cddl2jadn_dump(cddl, fname, source=""): with open(fname, "w") as f: if source: f.write("-- Generated from " + source + ", " + datetime.ctime(datetime.now()) + "\n\n") f.write(cddl2jadn_dumps(cddl))
from datetime import datetime #datetime modülü içerisinden datetime sınıfını dahil ettik import locale locale.setlocale(locale.LC_ALL, "") #Dili türkçe yapar print(datetime.now()) #anlık zaman basar su_an = datetime.now() print(su_an.year) #yılı basar print(su_an.month) #ayı basar print(su_an.hour) #saati basar print( datetime.ctime(su_an)) #Daha anlaşılır bir çıktı verir. Mon November tarzı print("-----------------") print(datetime.strftime(su_an, "%Y")) #Sadece yılı basar 2017 print(datetime.strftime(su_an, "%B")) #Ay basar November print(datetime.strftime(su_an, "%A")) #Gün ismi Monday print(datetime.strftime(su_an, "%X")) #Saat basar 23:00:14 print(datetime.strftime(su_an, "%D")) #Gün bilgisi 11/27/17 print(datetime.strftime(su_an, "%Y %B %A")) #2017 November Monday saniye = datetime.timestamp( su_an) #saniye hesaplar datetime objesini saniyeye çevirir. print(saniye) su_an2 = datetime.fromtimestamp( saniye) #saniye türünden geleni datetime'a çevirir. print(su_an2) su_an = datetime.fromtimestamp(0) #Milat değerini verir 1970 epoch time print(su_an)
def jadn_dump(schema, fname, source=""): with open(fname, "w") as f: if source: f.write("\"Generated from " + source + ", " + datetime.ctime(datetime.now()) + "\"\n\n") f.write(jadn_dumps(schema) + "\n")
# from datetime import time # import datetime simdi = datetime.now() simdi = datetime.today() result = datetime.now() result = simdi.year result = simdi.month result = simdi.day result = simdi.hour result = simdi.minute result = simdi.second result = datetime.ctime(simdi) result = datetime.strftime(simdi, '%Y') # Sadece yıl bilgisi. result = datetime.strftime(simdi, '%X') # Sadece saat bilgisi. result = datetime.strftime(simdi, '%d') # Sadece gün bilgisi. result = datetime.strftime(simdi, '%A') # Sadece gün bilgisini string olarak verir. result = datetime.strftime(simdi, '%B') # Sadece ay bilgisini string olarak verir. # İstediğimiz bilgiyi spesifik bir şekilde istediğimiz gibi alabiliriz. result = datetime.strftime(simdi, '%Y %B %A') # Yıl, Ay, Gün şeklinde. t = '15 April 2019 hour 10:12:30' # datetime objesine çeviricez. result = datetime.strptime( t, '%d %B %Y hour %H:%M:%S' ) # bu şekilde önceden girdiğimiz string bilgiyi, tarih bilgisine dönüştürebiliriz.
theDate = datetime.now() #Now (Year, day, month, clock) print("Year, day, month:", theDate) #Only Year print("Year:", theDate.year) #Only Today print("Day:", theDate.day) #Only Month print("Month:", theDate.month) #Show as fancy print(datetime.ctime(theDate)) # strftime() Function """ Year: %Y Month: %B Day(Name): %A Clock: %X Day: %D """ print("Strftime:", datetime.strftime(theDate, "%D %A %B %Y")) #Set datetime as your current location import locale
def timestamp(): """Return formatted date-time string""" return "\n%s\n[%s] " % ("#" * 50, datetime.ctime(datetime.now()))
def proto2jadn_dump(proto, fname, source=""): with open(fname, "w") as f: if source: f.write("-- Generated from " + source + ", " + datetime.ctime(datetime.now()) + "\n\n") f.write(proto2jadn_dumps(proto))
# 获取几天前的时间 from datetime import datetime, timedelta t = datetime.now() print(t) t1 = t - timedelta(days=3) print(t1) # 转化为指定的格式 t2 = datetime.ctime(t1) print(t2) t3 = datetime.strftime(t1, '%Y-%m-%d %H:%M:%S') print(t3)
from datetime import datetime print(datetime.now()) moment = datetime.now() print(moment.year) print(moment.month) print(moment.day) print(moment.hour) print(moment.minute) print(moment.second) print(moment.microsecond) print(datetime.ctime(moment)) print("#") print(datetime.strftime(moment, "%Y")) print(datetime.strftime(moment, "%B")) print(datetime.strftime(moment, "%A")) print(datetime.strftime(moment, "%X")) print(datetime.strftime(moment, "%D")) print(datetime.strftime(moment, "%Y %B %A")) import locale print(locale.setlocale(locale.LC_ALL, ""))
def process_paired(inputs, qseq, outdir, outprefix, unpaired, minlen, maxlen, limit, minqual, \ noSeparate, combined, qual64offset, replace, \ stripHeaders, fasta, verbose): """Process paired libraries.""" ## Define output fnames fnend = outformat = 'fasta' if fasta else 'fastq' prefix = ("%sq%s_l%s") % (outprefix, minqual, minlen) outfnF = os.path.join(outdir, '%s.1.%s' % (prefix, fnend)) outfnR = os.path.join(outdir, '%s.2.%s' % (prefix, fnend)) unpairedfn = os.path.join(outdir, '%s.unpaired.%s' % (prefix, fnend)) combinedfn = os.path.join(outdir, '%s.combined.%s' % (prefix, fnend)) ## Check if outfiles exists if not replace: if os.path.isfile(outfnF) or os.path.isfile(outfnR) or \ os.path.isfile(unpairedfn) or os.path.isfile(combinedfn): logFile.write( "At least one of the output files is present. Remove " "them or run with --replace parameter. Exiting!\n") logFile.flush() exit(-3) #open files for writting outF = outR = outCombined = outUnpaired = False if not noSeparate: outF = open(outfnF, 'w') outR = open(outfnR, 'w') #open out file for unpaired reads if unpaired: outUnpaired = open(unpairedfn, 'w') #open out file for combined FastQ if combined: outCombined = open(combinedfn, 'w') outfiles = (outF, outR, outCombined, outUnpaired) #process all input files fpair = [] i = pi = filtered = single = 0 for fn in inputs: fpair.append(fn) if len(fpair) != 2: continue ## Process QSEQ files: GERALD->FASTA i, pfiltered, psingle = filter_paired(fpair, outfiles, minlen, maxlen, limit, minqual,\ qual64offset, qseq, stripHeaders, outformat, pi) ## Print info if verbose: logFile.write('[%s] %s %s %s %s\n' % \ (datetime.ctime(datetime.now()), fpair[0].name, fpair[1].name,\ i-pi, pfiltered)) logFile.flush() #update read counts pi = i filtered += pfiltered single += psingle #reset fnames fpair = [] ## Close outfiles for outfile in outfiles: if outfile: outfile.close() ## Print info ratio = (i - filtered) * (100.0 / i) logFile.write('Processed pairs: %s. Filtered: %s. Reads ' % (i, filtered)) logFile.write('pairs included: %s [%.2f%c]. ' % (i - filtered, ratio, '%')) logFile.write('Orphans: %s [%.2f%c]\n' % (single, single * (100.0 / i), '%')) logFile.flush()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ __title__ = '' __author__ = 'Mad Dragon' __mtime__ = '2019/1/5' # 我不懂什么叫年少轻狂,只知道胜者为王 ┏┓ ┏┓ ┏┛┻━━━┛┻┓ ┃ ☃ ┃ ┃ ┳┛ ┗┳ ┃ ┃ ┻ ┃ ┗━┓ ┏━┛ ┃ ┗━━━┓ ┃ 神兽保佑 ┣┓ ┃ 永无BUG! ┏┛ ┗┓┓┏━┳┓┏┛ ┃┫┫ ┃┫┫ ┗┻┛ ┗┻┛ """ from datetime import datetime, date, time print(datetime.now()) print(datetime.date(datetime.now())) print(datetime.time(datetime.now())) print(datetime.ctime(datetime.now())) print(datetime.utcnow())
def get_notice_full(perc: int, process_num: str, base: int=10000) -> None: if perc % base == 0: message("{0} data points processed @ {1}!".format(perc, process_num)) message("Time: {}".format(datetime.ctime(datetime.now()))) get_system_status()
def logger(message, log=sys.stdout): """Log messages""" memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 log.write("[%s] %s [memory: %6i Mb]\n"%(datetime.ctime(datetime.now()), message, memory))