def fetch_core(file_dir, url, access_token): # Initialize the needed modules CHandler = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) browser = urllib2.build_opener(CHandler) browser.addheaders = [('User-agent', 'InFB - [email protected] - http://ruel.me')] urllib2.install_opener(browser) # print 'Using access token: %s' % access_token + '\n' url = 'https://graph.facebook.com/'+url+ ('?limit=100&access_token=%s' % access_token) print url res = browser.open(url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) i=1 FileUtility.write(file_dir+"/data"+str(i)+".json",fres) while len(jdata['data'])>0: data = jdata['data'] for sub_data in data: pid = sub_data['id'] FetchPostsDetail.fetch_core(pid,access_token, browser) url = jdata['paging']['next'] print url res = browser.open(url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) i=i+1 FileUtility.write(file_dir+"/data"+str(i)+".json", fres)
def testSaveIfEmptyDirectory(self, mocked_pd_csv, mocked_pl_join, mocked_pl_mkdir, mocked_pl_isdir): ''' Test that metric is saved correctly with an empty directory path given ''' # assert mocks self.assertIs(pd.Series.to_csv, mocked_pd_csv) self.assertIs(Path.mkdir, mocked_pl_mkdir) self.assertIs(Path.joinpath, mocked_pl_join) self.assertIs(Path.is_dir, mocked_pl_isdir) # create mock values mocked_metric = pd.Series([.1, .2, .3], name='metric-mock', dtype=float) mocked_pl_isdir.return_value = False # create default directory mocked_pl_join.return_value = Path('default/directory/file') # call function to test fut.save_metric_to_file(mocked_metric) # save to default directory # assert calls mocked_pl_isdir.assert_called() # called twice mocked_pl_mkdir.assert_called_once() mocked_pl_join.assert_called() # called twice mocked_pd_csv.assert_called_once()
def index(self): # Every yield line adds one part to the total result body. home_page = public.INDEX_PAGE if not FileUtility.fileExists(home_page): yield "您所访问的页面不存在!" if FileUtility.fileSize(home_page) <= 1024: content = FileUtility.fileRead( home_page ) if content == None: content = "您所访问的页面不存在!" yield content else: try: fp = None fp = open(home_page, "rb") max_size = FileUtility.fileSize(home_page) pos = 0 size = 1024 while pos < max_size: if pos + size >= max_size: size = max_size - pos content = fp.read(size) yield content pos += size except Exception, e: pass finally:
def fetch_core(uid, access_token): uid = str(uid) # Initialize the needed modules CHandler = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) browser = urllib2.build_opener(CHandler) browser.addheaders = [('User-agent', 'InFB - [email protected] - http://ruel.me')] urllib2.install_opener(browser) # print 'Using access token: %s' % access_token + '\n' url = 'https://graph.facebook.com/%s?access_token=%s' % (uid, access_token) print url res = browser.open(url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write('data/profile/%s.json' % uid,fres) # fetch profile picture if os.path.isfile('data/profile/%s_picture_large.png'%uid) == False: f = open('data/profile/%s_picture_large.png'%uid,"wb") f.write(urllib.urlopen('https://graph.facebook.com/%s/picture?type=large'%uid ).read()) f.close() if os.path.isfile('data/profile/%s_picture.png'%uid) == False: f = open('data/profile/%s_picture.png'%uid,"wb") f.write(urllib.urlopen('https://graph.facebook.com/%s/picture'%uid ).read()) f.close()
def load_sgf(self, fn): content = "" if fn : path = os.path.join(self.dbhome, fn) if FileUtility.fileExists(path): content = FileUtility.fileRead(path) return content
def __save(self, df, alpha_date, month, discard_days): df['code'] = df['code'] + '-CN' FileUtility.create_dir('{2}/PMOM{0}{1}'.format(month, discard_days, self.output_path)) df = df[['code', 'adj_return']] df.columns = ['S_INFO_WINDCODE', 'PMOM%s%s_raw' % (month, discard_days)] df = df.to_csv( '{3}/PMOM{1}{2}/PMOM{1}{2}_raw_CN_{0}.csv'.format(alpha_date.strftime('%Y%m%d'), month, discard_days, self.output_path), index=False)
def edit_double_attr(new_attr, rng): rng = [int(n) for n in rng.split("-")] remainder = new_attr % 256 multiplier = new_attr // 256 new_attr = remainder + (256 * multiplier) new_attr = hex(new_attr).encode().split(b'x') new_attr = new_attr[1].zfill(4) new_attr = [(new_attr[i:i + 2]) for i in range(0, len(new_attr), 2)] FileUtility.change_value(new_attr, rng)
def __init__(self, dbfile=None): if dbfile != None: self.dbfile = dbfile else: self.dbfile = public.DB_FILE if not FileUtility.fileExists(self.dbfile): FileUtility.createFile(self.dbfile) self.conn = None self.cursor = None self.conn_failed = False
def prepareConverter(self, fileIn): FileUtility.isValid(fileIn) sys.stderr.write("counting lines to prepare converter\n") numOfLines = FileUtility.countLines(fileIn) sys.stderr.write(str(numOfLines)+" to read\n") readed = 0 for line in open(fileIn): genome = int(line.split()[0]) taxon = int(line.split()[1]) self.Converter[genome] = taxon readed += 1 if readed%100000 == 0: sys.stderr.write(str(readed)+" lines readed out of "+str(numOfLines)+"\n")
def prepareConverter(self, fileIn): FileUtility.isValid(fileIn) sys.stderr.write("counting lines to prepare converter\n") numOfLines = FileUtility.countLines(fileIn) sys.stderr.write(str(numOfLines) + " to read\n") readed = 0 for line in open(fileIn): genome = int(line.split()[0]) taxon = int(line.split()[1]) self.Converter[genome] = taxon readed += 1 if readed % 100000 == 0: sys.stderr.write( str(readed) + " lines readed out of " + str(numOfLines) + "\n")
def __init__(self, callback): LoadUtility.__init__(self, callback) phasesToScan = ["phase_3/models"] self.models = FileUtility.findAllModelFilesInVFS(phasesToScan) self.version_lbl = None self.clouds = None self.barShadow = None
def testValidFilePath(self): ''' Test that the last element is returned if a valid path is given ''' valid_path = 'valid/path/to/filename.py' returned_name = fut.extract_filename(valid_path) self.assertEqual(returned_name, 'filename.py')
def testEmptyFilePath(self): ''' Test that an empty filename is returned although an empty filepath is given ''' returned_name = fut.extract_filename('') self.assertEqual(returned_name, '')
def __init__(self, callback): LoadUtility.__init__(self, callback) phasesToScan = ['models', 'phase_3/models'] self.models = FileUtility.findAllModelFilesInVFS(phasesToScan) self.version_lbl = None self.clouds = None return
def testEmptyDirectory(self): ''' Test that a valid list is returned, although an empty directory is provided ''' returned_type = fut.get_all_code_files('', ['.h']) self.assertIsInstance(returned_type, list) self.assertEqual(returned_type, [])
def testEmptyExtensionsList(self): ''' Test that a valid list is returned, although an empty extensions-list is provided ''' returned_type = fut.get_all_code_files('../', []) self.assertIsInstance(returned_type, list) self.assertEqual(returned_type, [])
def is_sgf_file(self, fn): ext = FileUtility.fileExt(fn) if ext == 'sgf': ret = 0 elif ext == 'sgf_': ret = 1 else: ret = None return ret
def loadConverter(self, file): sys.stderr.write("Loading converter from file\n") if FileUtility.isValid(file): f = open(file, "rb") self.Converter = pickle.load(f) f.close() sys.stderr.write("Converted loaded\n") else: Error.error("Converter.bin can not be opened. You should produce or reproduce it using prepare.py")
def initDB(self): """ 初始化每个习题集的数据库,不要sqlite的原因如下: 1)sqlite在cubieboard等板子中速度较慢。 2)sqlite不支持多线程。 每个习题集用一个json数据库文件, 文件名用md5归一化,加载前 先初始化。该数据库文件格式如下: { sum: xx, finish_sum: xx, current_num: xx, files:{ <fn1>: { s : 0 | 1, ## status:0:未完成,1:已完成 t1: xxx, ## start time 开始答题时间,以秒为单位 t2: xxx, ## end time 结束答题时间 c : 0 | 1, ## crypt 0:未加密,1:已加密 n : yyy, ## file index number 题目编号 st : xx, ## 行棋的步骤 tr : xx, ## 尝试的步骤 d : 0 | 1 ## 是否看了答案 }, <fn2>: { s : 0 | 1, ## status t1: xxx, ## start time t2: xxx, ## end time c : 0 | 1, ## crypt n : yyy, ## file index number st : xx, ## 行棋的步骤 tr : xx, ## 尝试的步骤 d : 0 | 1 ## 是否看了答案 }, .... } } """ if FileUtility.fileExists(self.dbfile): return True db = {} files = {} count = 0 finish = 0 current= 0 for fn in self.sgf_db.list_dir(): crypt = self.is_sgf_file(fn) if None == crypt: continue files[fn] = {} files[fn]['c'] = crypt ## crypt files[fn]['s'] = 0 ## status files[fn]['t1']= 0 ## start time files[fn]['t2']= 0 ## end time files[fn]['n'] = count ## fn index count += 1 db['sum'] = count db['finish_sum'] = finish db['current_num']= current db['files'] = files self.saveDB(self.dbfile, db) del db
def validate_choice(choice, bound, save=False): try: choice = int(choice) if choice not in range(0, bound + 1): raise ValueError except ValueError: if isinstance(choice, str) and choice.upper() == 'S' and bound <= 11: FileUtility.write_file() save = True print("File Saved") elif isinstance(choice, str) and bound > 50: print("Non-numeric value entered") raise ValueError elif isinstance(choice, int) and choice > bound: print("Value entered is out of bounds") raise ValueError else: print("Unknown command") return -1, save return choice, save
def loadConverter(self, file): sys.stderr.write("Loading converter from file\n") if FileUtility.isValid(file): f = open(file, "rb") self.Converter = pickle.load(f) f.close() sys.stderr.write("Converted loaded\n") else: Error.error( "Converter.bin can not be opened. You should produce or reproduce it using prepare.py" )
def create_db(self, dest_file, src_path, suffix = ["sgf", "sgf_"]): try: tar = None tar = tarfile.open(dest_file,"w:") ## path = os.path.normpath(src_path.strip()) for root, dir, files in os.walk(path): for fn in files: if FileUtility.fileExt(fn) not in suffix: continue fullpath = os.path.join(root, fn) tar.add(fullpath) except Exception, e: print e
def read(self, filename, isAbs=False): if not isAbs: filename = FileUtility.getAbsFilePath(filename) file = open(filename) content = "" try: content = file.read() except: file.close() return ErrorCode.Status.READ_FILE_ERROR, content file.close() return ErrorCode.Status.SUCC, content
def fetch_core(post_id, access_token, browser): if os.path.isdir("data/posts/"+post_id) == False: os.mkdir("data/posts/"+post_id) else: print 'File has aleary existed, skip!!' return print 'In FetchPostsDetail.py parse' + post_id url = 'https://graph.facebook.com/'+post_id+ ('&access_token=%s' % access_token) res = try_brower(browser, url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write("data/posts/"+post_id+"/content.json",fres) url = 'https://graph.facebook.com/'+post_id+'/likes'+ ('&access_token=%s' % access_token) res = try_brower(browser, url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write("data/posts/"+post_id+"/likes.json",fres) url = 'https://graph.facebook.com/'+post_id+'/comments'+ ('&access_token=%s' % access_token) res = try_brower(browser, url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write("data/posts/"+post_id+"/comments.json",fres)
def write(self, filename, content, isAbs=False): processingFilename = self.__get_tmp_filename(filename) if not isAbs: processingFilename = FileUtility.getAbsFilePath(processingFilename) file = open(processingFilename, 'w') try: file.write(content) except: file.close() return ErrorCode.Status.SAVE_FILE_ERROR file.close() shutil.move(processingFilename, filename) return ErrorCode.Status.SUCC
def read_tree_of_life(self, file_in): FileUtility.isValid(file_in) to_read = FileUtility.countLines(file_in) sys.stderr.write(str(to_read)+" lines to read to construct tree\n") num_line = 0 for line in open(file_in): parent = int(line.split()[0]) kid = int(line.split()[1]) if kid == parent: sys.stderr.write("Warning: I can't create a link from %s to %s (line %s)\n" % (parent, kid, num_line)) continue if self.node_exist(parent): if self.node_exist(kid): self.get_node(parent).add_kid(kid) self.get_node(kid).set_parent(parent) else: new_node = Taxon(kid) self.create_node(new_node) self.get_node(parent).add_kid(kid) self.get_node(kid).set_parent(parent) else: if self.node_exist(kid): new_node = Taxon(parent) self.create_node(new_node) self.get_node(kid).set_parent(parent) self.get_node(parent).add_kid(kid) else: new_kid = Taxon(kid) new_parent = Taxon(parent) new_kid.set_parent(parent) new_parent.add_kid(kid) self.create_node(new_kid) self.create_node(new_parent) num_line += 1 if num_line % 100000 == 0: sys.stderr.write(str(num_line)+" lines read\n") sys.stderr.write("Tree base constructed\n")
def testNoneListType(self): ''' Test that an empty list is returned, although an invalid type is provided, and a warning is thrown ''' with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") returned_type = fut.get_all_code_files('../', None) self.assertIsInstance(returned_type, list) self.assertEqual(returned_type, []) self.assertEqual(len(w), 1) self.assertTrue('Returning empty list..' in str(w[-1].message))
def testSaveIfExistingDirectory(self, mocked_pd_csv, mocked_pl_join, mocked_pl_isdir): ''' Test that metric is saved correctly given an existing directory path ''' # assert mocks self.assertIs(pd.Series.to_csv, mocked_pd_csv) self.assertIs(Path.joinpath, mocked_pl_join) self.assertIs(Path.is_dir, mocked_pl_isdir) # create mock values mocked_metric = pd.Series([.1, .2, .3], name='metric-mock', dtype=float) mocked_pl_isdir.return_value = True # given directory given mocked_pl_join.return_value = Path('default/directory/file') # call function to test fut.save_metric_to_file(mocked_metric, 'dummy/directory') # assert calls mocked_pl_isdir.assert_called_once() mocked_pl_join.assert_called_once() mocked_pd_csv.assert_called_once()
def append(self, filename, content, isAbs=False): processingFilename = self.__get_tmp_filename(filename) if os.path.exists(filename): shutil.copy(filename, processingFilename) if not isAbs: processingFilename = FileUtility.getAbsFilePath(processingFilename) file = open(processingFilename, 'a') try: # 移动到文件尾 file.seek(0, 2) file.write(content) except: file.close() return ErrorCode.Status.SAVE_FILE_ERROR file.close() shutil.move(processingFilename, filename) return ErrorCode.Status.SUCC
def load_conf(self): try: conf = FileUtility.readFile(self.conf).replace("\r", "") exec conf gl = locals() for var in self.conf_vars.keys(): if gl.has_key(var): self.conf_vars[var] = gl[var] ## patch xiti_home, because run as daemon, which need full path if 'xiti_home' in self.conf_vars: xiti_home = self.conf_vars['xiti_home'] conf_path, pf = os.path.split(self.conf) if not os.path.isabs(xiti_home): xiti_home = os.path.normpath(os.path.join(conf_path, xiti_home)) self.conf_vars['xiti_home'] = xiti_home self.xiti_conf = [ ["" , self.conf_vars['xiti_conf'] ] ] del self.conf_vars['xiti_conf'] except Exception, e: print e
def create_db(self, src_path, suffix = ["sgf", "sgf_"]): logdb_file = self.dbfile if os.path.exists(logdb_file): import FileUtility as fu dirname = os.path.dirname(logdb_file) if not fu.pathExists(dirname): fu.createPath(dirname) self.createTable() for p in FileUtility.listAll(src_path, just_directory = True, contain_file = True): norm_p = p.replace("\\", "/") if not norm_p.startswith("/"): norm_p = "/" + norm_p norm_p = UnicodeUtil.get_unicode_str(norm_p) xiti_set_saved = False fp = os.path.join(src_path, p.lstrip(os.path.sep)) fp = UnicodeUtil.get_unicode_str(fp) for fn in FileUtility.listfiles(fp): #print fn if FileUtility.fileExt(fn) not in suffix: continue if xiti_set_saved == False: fid = file_id(norm_p) self.add_xiti_set(norm_p, fid) xiti_set_saved = True fn = os.path.join(fp, fn) self.add_xiti(fid, fn)
def populate(self, file_name1, taxon_name): # Create table... curs = self.cursor curs.execute("create table GENOME_TO_TAXON (GENOMES integer, TAXON integer, PRIMARY KEY (GENOMES))") self.converter.commit() # Check is file is ok FileUtility.isValid(file_name1) sys.stderr.write("counting lines to prepare converter\n") n_lines = FileUtility.countLines(file_name1) sys.stderr.write(str(n_lines)+" to read\n") readed = 0 # populate the DB for line in open(file_name1): genome = int(line.split()[0]) taxon = int(line.split()[1]) curs.execute('insert into GENOME_TO_TAXON (GENOMES, TAXON) values (?, ?)', (genome, taxon)) readed += 1 if readed % 10000 == 0: self.converter.commit() sys.stderr.write(str(readed)+" lines readed out of "+str(n_lines)+"\n") FileUtility.isValid(taxon_name) sys.stderr.write("counting lines to prepare converter\n") n_lines = FileUtility.countLines(taxon_name) sys.stderr.write(str(n_lines)+" to read\n") readed = 0 curs.execute("create table TAXON_NAMES (TAXON integer, NAME VARCHAR(100), RANK VARCHAR(30), " "PRIMARY KEY (TAXON))") for line in open(taxon_name): taxon = int(line.split("\t")[0]) name = str(line.split("\t")[1]) rank = str(line.split("\t")[2]).rstrip("\n") curs.execute('insert into TAXON_NAMES (TAXON, NAME, RANK) values (?, ?, ?)', (taxon, name, rank)) readed += 1 if readed % 10000 == 0: self.converter.commit() sys.stderr.write(str(readed)+" lines readed out of "+str(n_lines)+"\n")
res = try_brower(browser, url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write("data/posts/"+post_id+"/likes.json",fres) url = 'https://graph.facebook.com/'+post_id+'/comments'+ ('&access_token=%s' % access_token) res = try_brower(browser, url) fres = res.read() jdata = json.loads(fres) fres = json.dumps(jdata, ensure_ascii=False) FileUtility.write("data/posts/"+post_id+"/comments.json",fres) if __name__ == '__main__': # Initialize the needed modules CHandler = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) browser = urllib2.build_opener(CHandler) browser.addheaders = [('User-agent', 'InFB - [email protected] - http://ruel.me')] urllib2.install_opener(browser) FileUtility.user = '******' FileUtility.make_data_path() user = '******' passw = 'plumggmtutu' token = Token.get(user, passw) fetch_core('100000154563058_407470182627805', token, browser)
''' This module contains all character related functions and also items, reagents, armaments, spells, & equipment ''' import binascii import OffsetDict import FileUtility # Read the contents of the "SAVED.GAM" file file = FileUtility.read_file() SEPARATOR = "________________________________" # A function that validates the character name def name_check(name): if name.isalpha() and len(name) <= 9: return True else: return False # A function that reads the character name def read_name(rng): rng = [int(n) for n in rng.split("-")] name = file[rng[0]:rng[1]] name = b''.join(name) name = binascii.unhexlify(name).decode() return name # A function that edits the character name
print " -c clade file with [o1g1,o1g2....][o2g1,o2g2...]. For" print " verify_match, this is the coord file" print " -D directory with sequence files" print " -gap Gap file generated by ParseBlast.parse_gap" print " -wdir working dir, default ./" print " -infile tfasty output" print " -top return top match only, default 1 (true)" print "" sys.exit(0) if __name__ == '__main__': util = blast_util() fm = FastaManager.fasta_manager() fu = FileUtility.file_util() pb = ParseBlast.parser() f = fasta = p = o = g = fasta2 = s1 = s2 = b = dat = c = gap = \ D = wdir = infile = pm = db = fdir = outname = "" d = 0 m = 9 w = 5000 W = 3 s = w e = "1" n = 1 r = 0 F = 0 DEBUG = 0 by = 1 bdir = "/usr/bin/blast"
def list_dir(self): return FileUtility.listDir(self.dbhome, False, True)
def __init__(self, callback): LoadUtility.__init__(self, callback) phasesToScan = ['phase_3.5/models'] self.models = FileUtility.findAllModelFilesInVFS(phasesToScan)
required=False, default=0.00001) self.parser_plot.add_argument("-t", type=str, help="File type: db or taxonomy. Dafault = taxonomy", required=False, default="taxonomy") self.parser_plot.add_argument("-value", type=bool, help="Show values for each bar", required=False, default=False) self.parser_plot.add_argument("-multi", type=bool, help="If true, the -f file contains a list of biological"+ "abundance file to plot in the same figure", required=False, default=False) self.parser_plot.add_argument("-stacked", type=bool, help="Create a stacked bar graph instead of the"+ "normal graph", required=False, default=False) self.parser_plot.add_argument("-o", type=str, help="Name of the output file. Will not open a new window", required=False, default=None) #parse the args... self.Arguments = vars(self.parser.parse_args(args)) #Needed to get the args def getArguments(self): return self.Arguments def getParser(self): return self.parser if __name__=="__main__": parser = OptionParser(sys.argv[1:]) arg = parser.getArguments() print(arg) print(arg['f']) print(FileUtility.isValid(arg['f']))
def edit_single_attr(new_attr, rng): rng = int(rng) new_attr = hex(int(new_attr)).split('x') new_attr = new_attr[1].encode().zfill(2) FileUtility.change_value(new_attr, rng)
def __init__(self, callback): LoadUtility.__init__(self, callback) phasesToScan = ['models', 'phase_3/models'] self.models = FileUtility.findAllModelFilesInVFS(phasesToScan) self.version_lbl = None return
CONF_DIR = "conf" LOG_DIR = "log" WEBUI_DIR = "webui" XITI_DIR = "sgf" XITIDB_DIR= "db" WEBUI_HOME= os.path.abspath(os.path.join("..", WEBUI_DIR)) XITI_HOME = os.path.abspath(os.path.join("..", XITI_DIR)) CONF_FILE = "websrv.conf" CONF_HOME = os.path.abspath(os.path.join(HOME, CONF_DIR)) LOG_HOME = os.path.abspath(os.path.join(HOME, LOG_DIR)) XITI_DB_HOME= os.path.abspath(os.path.join(CONF_HOME, XITIDB_DIR)) if not os.path.exists(XITI_DB_HOME): import FileUtility as fu fu.createDir(XITI_DB_HOME) SRV_CONF = os.path.join(CONF_HOME, CONF_FILE) DB_FILE = os.path.abspath(os.path.join(LOG_HOME, "mbdb.dat")) LOG_FILE = os.path.abspath(os.path.join(LOG_HOME, "log.txt")) INDEX_PAGE= os.path.abspath(os.path.join(WEBUI_HOME, "weiqi.htm")) XITI_CONF = os.path.abspath(os.path.join(CONF_HOME, "xiti.conf")) SGF_DB = os.path.abspath(os.path.join(XITI_HOME, "sgf.db")) ## sgf tar SGF_DB_FILE = 0 SGF_DB_TAR = 1 SGF_DB_SQL = 2 bool_tbl = {'True': True, 'False': False} def get_websrv_conf():
def feed(access_token): dir_path = FileUtility.get_feed_data_path() print 'now fetching = feed\n' fetch_core(dir_path, "me/feed", access_token)
def __init__(self, filePath): self.filePath = filePath readData = FileUtility.readFile(filePath) self.height = MerkleTree.getHeightOfMerkleTree(self.filePath, MerkleTree.DEFAULT_CHUNK_SIZE) print "filePath: " + self.filePath + " Length: " + str(len(readData)) + " Height: " + str(self.height) self.buildTree(readData)
def posts(access_token): dir_path = FileUtility.get_posts_data_path() print 'now fetching = posts\n' fetch_core(dir_path, "me/posts", access_token)
def get_file_base64_content(self, fn): content = FileUtility.fileRead(fn) if content: return base64.b64encode(content) else: return None
print (" -c clade file with [o1g1,o1g2....][o2g1,o2g2...]. For") print (" verify_match, this is the coord file") print (" -D directory with sequence files") print (" -gap Gap file generated by ParseBlast.parse_gap") print (" -wdir working dir, default ./") print (" -infile tfasty output") print (" -top return top match only, default 1 (true)") print ("") sys.exit(0) if __name__ == '__main__': util = blast_util() fm = FastaManager.fasta_manager() fu = FileUtility.file_util() pb = ParseBlast.parser() f = fasta = p = o = g = fasta2 = s1 = s2 = b = dat = c = gap = \ D = wdir = infile = pm = db = fdir = outname = "" d = 0 m = 9 w = 5000 W = 3 s = w e = "1" n = 1 r = 0 F = 0 DEBUG = 0 by = 1 bdir = "/usr/bin/blast"
def open_book(self, book_name): self._book_handler = FileUtility.bookOperations(book_name) self._book_handler.open_book() self._cache = self._book_handler.fill_cache()
def edit_name(new_name, rng): rng = [int(n) for n in rng.split("-")] new_name = binascii.hexlify(new_name.encode()) new_name = new_name.ljust(20, b'0') new_name = [(new_name[i:i + 2]) for i in range(0, len(new_name), 2)] FileUtility.change_value(new_name, rng, value_flag=1)