def main(argv): parser = argparse.ArgumentParser(description='Replace @THEME_DIRS@') parser.add_argument('-i', '--input', help='Input file') parser.add_argument('-o', '--output', help='Output file') args = parser.parse_args() check_required_args(args, ['input','output']) replace(args.input, args.output, '@THEME_DIRS@', ','.join(icon_dirs) + ',')
def main(argv): parser = argparse.ArgumentParser(description='Replace @THEME_DIRS@') parser.add_argument('-i', '--input', help='Input file') parser.add_argument('-o', '--output', help='Output file') args = parser.parse_args() check_required_args(args, ['input', 'output']) replace(args.input, args.output, '@THEME_DIRS@', ','.join(icon_dirs) + ',')
def _tpl(self): ''' handle tempaltes 模板仅需加时间戳和变量替换。 这里需要加入额外的{compile_dir}文件夹下的文本文件。 ''' fs = utils.FileSearcher(r'\.%s$'%C('template_ext'),self._build_tpl_dir,relative = False) tpls = fs.search() if self._compile_dir: nfs = utils.FileSearcher(r'.+',self._build_compile_dir,relative = False) compile_files = nfs.search() for f in compile_files: if not utils.isBinary(f): tpls.insert(0,f) for tpl in tpls: try: content = utils.readfile(tpl) #模板的静态资源相对目录应该写死为cwd,即资源路径应该始终是绝对路径 content = allt(content,self._build_dir,force_abspath = False) content = replace(content,self._target) content = removeCssDepsDeclaration(content) utils.writefile(tpl,content) except Exception,e: if self._force: log.error('[tpl]%s'%e) else: raise e
def get(self): template = jinja_environment.get_template('trade.html') template_values = { 'head' : cst.head, 'nearRate' : memcache.get("nearRate"), 'nextRate' : memcache.get("nextRate"), 'near' : memcache.get("near"), 'next' : memcache.get("next"), 'nearT' : memcache.get("nearT"), 'nextT' : memcache.get("nextT"), 'now' : memcache.get("now"), 'nearOps' : memcache.get("nearOps"), 'nextOps' : memcache.get("nextOps"), 'nearChain' : memcache.get("nearChain"), 'nearF' : memcache.get("nearF"), 'nearK' : memcache.get("nearK"), 'nearSigma' : memcache.get("nearSigma"), 'nearPSigma' : memcache.get("nearPSigma"), 'nearCSigma' : memcache.get("nearCSigma"), 'nextChain' : memcache.get("nextChain"), 'nextF' : memcache.get("nextF"), 'nextK' : memcache.get("nextK"), 'nextSigma' : memcache.get("nextSigma"), 'nextPSigma' : memcache.get("nextPSigma"), 'nextCSigma' : memcache.get("nextCSigma"), 'quote' : memcache.get("quote"), 'real' : memcache.get("real"), 'costT' : memcache.get("costT"), } self.response.out.write(myreplace.replace ( template.render(template_values)))
def _css(self): ''' handle css r.js会对不同目录下CSS合并时的URL进行修正,因而对于@something@开头的路径会被认为是相对路径, 产生修正错误,解决方案是先对所有CSS文件进行变量替换,时间戳添加,再由r.js合并。这会降低处理速 度但可以解决该问题。 考虑到速度,此过程仅支持在build时进行,开发服务器访问时不能使用。 所有静态资源路径都应该使用绝对路径,避免在CSS中引用相对路径的图像资源。 ''' #搜索所有CSS文件 all_css_files = utils.FileSearcher(r'\.css$',self._build_css_dir,relative = False).search() #替换和加时间戳 for dst in all_css_files: try: content = utils.readfile(dst) content = all_url(content,os.path.dirname(dst)) content = replace(content,self._target) utils.writefile(dst,content) except Exception,e: if self._force: log.error('[css]%s'%e) else: raise e
def render(self,build = False): ''' 查找数据文件依赖并渲染模板 ''' #remove '/'s at start if self.__html is None: data = self.getData() multoken = self.__token.split('/') data.update({'_token': self.__token.replace('/','_')}) data.update({'_folder':multoken[0]}) data.update({'_subtoken':multoken[1] if len(multoken)>1 else ""}) tpl_path = self.__token + "." + C('template_ext') html = render_file( tpl_path,data,False,build) if C('server_add_timestamp'): #html = html_script(html) #html = html_link(html) #html = html_img(html) #html = all_url(html) html = allt(html) html = replace(html) if not build and not re.match(r'[\s\S]*?<html[\s\S]+?<body',html,re.I): #sub template css_deps = self.__getDepsCss(html) for tpl in self.__include_deps: css = os.path.join('.',C('static_dir'),C('css_dir'),re.sub(r"\.%s"%C('template_ext'),".css",tpl)) css_deps.append(css) subparent = 'subparent.tpl'# os.path.join(BASE_DIR,"tpl",'subparent.tpl') html = render_file(subparent,{'name': self.__token,'content': html,'required_css': css_deps},noEnvironment = True) html = removeCssDepsDeclaration(html) self.__html = html return self.__html
def get(self): template_values = { 'author':'Deployed @ Google App Engine', 'time':datetime.datetime.now(GMT5()).strftime("%Y-%b-%d %H:%M:%S"), 'head' : cst.head, 'version': version.get_version(), } template = jinja_environment.get_template('index.html') self.response.out.write(myreplace.replace ( template.render(template_values)))
def python(self): if os.path.exists("setup.py"): pyversions = Classifiers().pyversions pyversions = replace.replace(pyversions, {3.7: "3.7-dev", 3.8: "3.7-dev"}) for r in [2, 3]: if r in pyversions: pyversions.remove(r) if pyversions: return list(map(quote, pyversions)) return [quote("3.6")]
def changeFontName(fileName): #make the ttx file subprocess.run(["ttx", "Fonts/" + fileName + ".ttf"]) #create new string of fileName with a space inserted after every captial letter that's not the first character in the string spacedName = "" for c in fileName: if (c.isupper() and c != fileName[0]): spacedName = spacedName + " " spacedName = spacedName + c #look for spacedName string in fileName.ttx, replace with fileName replace.replace("Fonts/" + fileName + ".ttx", spacedName, fileName) #make new ttf file subprocess.run(["ttx", "-f", "Fonts/" + fileName + ".ttx"]) #delete ttx file os.remove("Fonts/" + fileName + ".ttx")
def main(argv): base_pc = BasePCItems() base_pc.setup(argv) pkg_replace_items = { '@GDK_PIXBUF_API_VERSION@': '2.0', '@GDK_PIXBUF_BINARY_VERSION@': '2.10.0', '@PNG_DEP_CFLAGS_PACKAGES@': '', '@GDK_PIXBUF_EXTRA_LIBS@': '', '@GDK_PIXBUF_EXTRA_CFLAGS@': '' } pkg_replace_items.update(base_pc.base_replace_items) # Generate gdk-pixbuf-2.0.pc.tmp to replace the module directory replace_multi(base_pc.top_srcdir + '/gdk-pixbuf-2.0.pc.in', base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp', pkg_replace_items) replace(base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp', base_pc.srcdir + '/gdk-pixbuf-2.0.pc', '${prefix}/lib/gdk-pixbuf', '${exec_prefix}/lib/gdk-pixbuf') os.unlink(base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp')
def main(argv): base_pc = BasePCItems() base_pc.setup(argv) pkg_replace_items = {'@GDK_PIXBUF_API_VERSION@': '2.0', '@GDK_PIXBUF_BINARY_VERSION@': '2.10.0', '@PNG_DEP_CFLAGS_PACKAGES@': '', '@GDK_PIXBUF_EXTRA_LIBS@': '', '@GDK_PIXBUF_EXTRA_CFLAGS@': ''} pkg_replace_items.update(base_pc.base_replace_items) # Generate gdk-pixbuf-2.0.pc.tmp to replace the module directory replace_multi(base_pc.top_srcdir + '/gdk-pixbuf-2.0.pc.in', base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp', pkg_replace_items) replace(base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp', base_pc.srcdir + '/gdk-pixbuf-2.0.pc', '${prefix}/lib/gdk-pixbuf', '${exec_prefix}/lib/gdk-pixbuf') os.unlink(base_pc.srcdir + '/gdk-pixbuf-2.0.pc.tmp')
def main(argv): parser = argparse.ArgumentParser(description='Generate the complete cairo-1.0.gir') parser.add_argument('--dllname', required=True, help='Full file name of the Cairo-GObject DLL for the Cairo Introspection File') args = parser.parse_args() # Get the srcroot and the path where the bundled .gir files reside in the package srcroot = parent_dir(__file__) preset_gir_path = os.path.join(srcroot, 'gir') # Set up variables in cairo-1.0.gir.in to produce cairo-1.0.gir replace.replace(os.path.join(preset_gir_path, 'cairo-1.0.gir.in'), 'cairo-1.0.gir.tmp', '%CAIRO_GIR_PACKAGE%', 'cairo-gobject') replace.replace('cairo-1.0.gir.tmp', 'cairo-1.0.gir', '%CAIRO_SHARED_LIBRARY%', args.dllname) os.unlink('cairo-1.0.gir.tmp')
def get(self): vixs = memcache.get("vixs") if vixs is None: vixs = db.GqlQuery(" select * from vix ORDER BY marketTime DESC LIMIT 100" ) if not memcache.add("vixs", vixs, 10): logging.error("vixs memcache set failed") maturity = memcache.get("maturity") near = None next = None if maturity: near = maturity[0].strftime("%Y-%b-%d %H:%M:%S") next = maturity[1].strftime("%Y-%b-%d %H:%M:%S") try: iter(memcache.get("chain1")) except TypeError: chain1 = [] else: chain1 = memcache.get("chain2") try: iter(memcache.get("chain2")) except TypeError: chain2 = [] else: chain2 = memcache.get("chain2") template_values = { 'author':'Deployed @ Google App Engine', 'time':datetime.datetime.now(GMT5()).strftime("%Y-%b-%d %H:%M:%S"), 'r4': memcache.get("r4"), 'r13': memcache.get("r13"), 'near': near, 'next': next, 'chain1' : chain1, 'chain2' : chain2, 'k1' : memcache.get("k1"), 'k2' : memcache.get("k2"), 'T1' : memcache.get("nearTerm"), 'T2' : memcache.get("nextTerm"), 'f1' : memcache.get("f1"), 'f2' : memcache.get("f2"), 'sigma1' : memcache.get("sigma1"), 'sigma2' : memcache.get("sigma2"), 'quoteModel' : memcache.get("quoteModel"), 'loadTime' : memcache.get("costT"), 'vixQuote' : memcache.get("quoteReal"), 'yaTime' : memcache.get("yaTime"), 'vixs': vixs, } template = jinja_environment.get_template('project.html') self.response.out.write(myreplace.replace ( template.render(template_values)))
def git_source(meta): if not isdir(GIT_CACHE): os.makedirs(GIT_CACHE) git_url = meta['git_url'] git_dn = git_url.split(':')[-1].replace('/', '_') cache_repo = cache_repo_arg = join(GIT_CACHE, git_dn) if sys.platform == 'win32': cache_repo_arg = cache_repo_arg.replace('\\', '/') if os.getenv('USERNAME') == 'builder': cache_repo_arg = '/cygdrive/c/' + cache_repo_arg[3:] # update (or craete) the cache repo if isdir(cache_repo): check_call(['git', 'fetch'], cwd=cache_repo) else: check_call(['git', 'clone', '--mirror', git_url, cache_repo_arg]) assert isdir(cache_repo) # now clone into the work directory checkout = meta.get('git_tag') or meta.get('git_branch') or 'master' print 'checkout: %r' % checkout check_call(['git', 'clone', cache_repo_arg, WORK_DIR]) check_call(['git', 'checkout', checkout], cwd=WORK_DIR) if meta.get('git_submodules'): check_call(['git', 'submodule', 'init'], cwd=WORK_DIR) if sys.platform == 'win32': from replace import replace replace([('https://github.com/', '[email protected]:')], join(WORK_DIR, '.git', 'config'), assert_change=False) check_call(['git', 'submodule', 'update'], cwd=WORK_DIR) git_info() return WORK_DIR
def word_combi(word): split_set = [(word[:letter], word[letter:]) for letter in range(len(word) + 1)] subset_of_delete = [ leftside_set + rightside_set[1:] for leftside_set, rightside_set in split_set if rightside_set ] transposes = [ leftside_set + rightside_set[1] + rightside_set[0] + rightside_set[2:] for leftside_set, rightside_set in split_set if len(rightside_set) > 1 ] replaces = replace.replace(word) inserts = insert.insert(word) return list(set(subset_of_delete + transposes + replaces + inserts))
def _replace(self): ''' 替换所有文本的变量 ''' files = utils.FileSearcher(r'.+',self._build_dir).search() for f in files: f = os.path.join(self._build_dir,f) if not utils.isBinary(f): try: content = utils.readfile(f) content = replace(content,self._target) utils.writefile(f,content) except Exception,e: if self._force: log.error('[replace][%s]%s'%(f,e)) else: e
def build_js(self,src,dst,base_dir): ''' handle one js src to dst 合并、替换、加时间戳并按需压缩。 ''' js = os.path.relpath(src,base_dir) subprocess.call( 'node ' + RJS_PATH +' -o name=' + js[0:-3] + ' out='+ dst + ' optimize=none baseUrl='\ + base_dir , shell = True) #repalce content = utils.readfile(dst) content = replace(content,self._target) utils.writefile(dst,content) if C('js_ascii_only'): subprocess.call( 'node ' + RPL_PATH +' '+dst+' '+dst,shell = True) if self._compress: subprocess.call( 'java -jar ' + YC_PATH + ' --type js --charset ' + C('encoding') + ' ' + dst + ' -o ' + dst , shell = True )
def main(): base = cv.imread(args['base']) material = cv.imread(args['eye']) # images are not taken sequentially if args['a']: matches = [(extreme_mark(base, face_detect, mark_detect, min), extreme_mark(material, face_detect, mark_detect, max))] else: bad = blink_faces(base, face_detect, mark_detect) matches = matchfaces(base, bad, material, face_detect, mark_detect) # outputs replacement marking of the two input images #cv.imwrite(args['base'].replace('in/','out/'), mark(base, [m[0] for m in matches])) #cv.imwrite(args['eye'].replace('in/','out/'), mark(material, [m[1] for m in matches])) mask_acc = np.zeros(base.shape, base.dtype) for match in matches: base, mask = replace(base, match[0], material, match[1]) mask_acc += mask # outputs the mask used for replacement #cv.imwrite(args['base'].replace('in/','out/').replace('_c', '_mask'), mask_acc) cv.imwrite(args['out'], base)
def setup_vars_tools(module, func, srcfile, outfile): # This doesn't really matter for cmd.exe usage, but # let's just set this like this here, in case one # wants to use MinGW with the scripts generated here replace.replace(srcfile, outfile + '.tmp0', '@PYTHON@', 'python') # Now replace the needed items... replace.replace(outfile + '.tmp0', outfile + '.tmp', '@TOOL_MODULE@', module) os.unlink(outfile + '.tmp0') replace.replace(outfile + '.tmp', outfile, '@TOOL_FUNCTION@', func) os.unlink(outfile + '.tmp')
#!/usr/bin/env python import replace source = "package/module.py" new = replace.replace(source, {"/": ".", ".py": ""}) assert new == "package.module"
def __init__(self, fileName, ignoreList, shortcutList, response): global filename if shortcutList != None: ignoreList = ignoreList + list(shortcutList.values()) with open(fileName, encoding="utf-8") as f: data = f.readlines() f.close() io = WriteNewFile.fileCreator() #I tried to make this flag system it's own method #But it needs to be in here. #It's so it can print out a sort of loading bar a = False b = False c = False d = False e = False f = False remainingTime = len(data) tracker = 0 flagCnt = 1 for x in data: #print(x) #print("\n") #This is where it prints out the progress. flag = LoadingBar.bar(tracker, remainingTime, a, b, c, d, e, f) if flag == True: if flagCnt == 1: a = True flagCnt += 1 elif flagCnt == 2: b = True flagCnt += 1 elif flagCnt == 3: c = True flagCnt += 1 elif flagCnt == 4: d = True flagCnt += 1 elif flagCnt == 5: e = True flagCnt += 1 elif flagCnt == 6: f = True flagCnt += 1 if shortcutList != None: for vals in shortcutList: x = x.replace(vals, shortcutList[vals]) loweredArray = x.lower() if response == 1: sentences = loweredArray.split('.') for y in sentences: splitArray = re.findall(r"[\w']+", y) freqTable = DataManager.manager(ignoreList, splitArray) #print(freqTable) #replaceEngine(freqTable) would replace this part of the code wordsToReplace = [] for y in freqTable: if freqTable.get(y) != 1: wordsToReplace.append(y) #print(wordsToReplace) for y in wordsToReplace: wordsToUse = syn.synonyms(y) #print("\nSynonyms for '" + y + "' are: ",wordsToUse) #print('\n') x = repl.replace(y, x, wordsToUse) #print("\nnew string: " + x+"\n") tracker += 1 WriteNewFile.newFile(x, io) else: splitArray = re.findall(r"[\w']+", loweredArray) freqTable = DataManager.manager(ignoreList, splitArray) #print(freqTable) #replaceEngine(freqTable) would replace this part of the code toReplace = self.replaceEngine(freqTable) for j in toReplace: wordsToUse = syn.synonyms(j) #print ("\nsynonyms for " + j +" are ", wordsToUse) #print("\n") x = repl.replace(j, x, wordsToUse) #print("\nnew string: " + x+"\n") tracker += 1 WriteNewFile.newFile(x, io) LoadingBar.bar(tracker, remainingTime, a, b, c, d, e, f) io.close()
def search(word): split_set = [(word[:letter], word[letter:]) for letter in range(len(word) + 1)] subset_of_delete = [leftside_set + rightside_set[1:] for leftside_set, rightside_set in split_set if rightside_set] transposes = [leftside_set + rightside_set[1] + rightside_set[0] + rightside_set[2:] for leftside_set, rightside_set in split_set if len(rightside_set)>1] replaces = replace.replace(word) inserts = insert.insert(word) all_sets = list(set(subset_of_delete + transposes + replaces + inserts)) first_if = len(subset_of_delete[0]) second_if = len(transposes[0]) third_if = len(inserts[0]) data = numpy.array(list(open('text_doc.txt').read())) number_of_words = len(all_sets) cpu_data_word, gpu_data_word = [], [] for i in range(number_of_words): cpu_data_word.append(numpy.array(list(all_sets[i])).astype(numpy.str_)) gpu_data_word.append(cuda.mem_alloc(sys.getsizeof(cpu_data_word[i]))) data = data.astype(numpy.str_) test_data_gpu = cuda.mem_alloc(sys.getsizeof(data)) cuda.memcpy_htod(test_data_gpu,data) cpu_result_test_data_set, result_data_word = [], [] for i in range(number_of_words): cpu_result_test_data_set.append(numpy.zeros(shape=(1,len(data))).astype(numpy.int)) result_data_word.append(cuda.mem_alloc(sys.getsizeof(cpu_result_test_data_set[i]))) for k in range(number_of_words): cuda.memcpy_htod(gpu_data_word[k], cpu_data_word[k]) cuda.memcpy_htod(result_data_word[k], cpu_result_test_data_set[k]) stream = [] for i in range(number_of_words): stream.append(cuda.Stream()) mod = SourceModule(""" __global__ void searchKeywordKernel(int *result, char *data, char *keyword, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int value = 0; if(data[i-1] == ' ' && (data[i] == keyword[0])){ for(int k = 1; k < size ; k++ ){ if(data[i + k] != keyword[k]){ value = 0; break; } value = 1; } if (value == 1 && data[i+size] == ' '){ result[i] = 1; } } } """) func = mod.get_function("searchKeywordKernel") ran = 0 start = time.time() for k in range(number_of_words): if( len(cpu_data_word[k]) == first_if): func( result_data_word[k] , test_data_gpu , gpu_data_word[k] , numpy.int32(first_if) , block=(512,1,1) , grid=(len(data)/512,1,1), stream = stream[k]) elif( len(cpu_data_word[k]) == second_if): func( result_data_word[k] , test_data_gpu , gpu_data_word[k] , numpy.int32(second_if) , block=(512,1,1) , grid=(len(data)/512,1,1), stream = stream[k]) elif( len(cpu_data_word[k]) == third_if): func( result_data_word[k] , test_data_gpu , gpu_data_word[k] , numpy.int32(third_if) , block=(512,1,1) , grid=(len(data)/512,1,1), stream = stream[k]) cuda.Stream().synchronize() end = time.time() ran += (end - start) print "run time = ", ran for k in range(number_of_words): cuda.memcpy_dtoh(cpu_result_test_data_set[k], result_data_word[k]) pycuda.tools.clear_context_caches() word_max = None max_value = 0 for k in range(number_of_words): result = cpu_result_test_data_set[k] total_matches = 0; for i in range(len(data)): if result[0][i] == 1: total_matches += 1 if total_matches >= 1 and total_matches > max_value: max_value = total_matches word_max = cpu_data_word[k] if word_max is None: return word, ran return ''.join(word_max) , ran
#!/usr/bin/env python import replace source = ["3.6", "3.7", "3.8"] new = replace.replace(source, {"3.7": "3.7-dev", "3.8": "3.8-dev"}) assert new == ["3.6", "3.7-dev", "3.8-dev"]
count = 0 run = True answer = input("Enter answer: \n") pass_range = input("Enter password range: \n") for root, dire, files in os.walk(path): if run: for val in files: if run: print('-------------') print(val) print('-------------') file = open('pass-list/' + str(val)) for line in file: password = replace.replace(line, {'\n': '', ' ': ''}) if len(password) == int(pass_range): count += 1 print('Password №%s: %s' % (str(count), password)) if password == answer: print('------------------------') print('Correct password: '******'------------------------') run = False break file.close() if run: print("---------------------------") print("Can't find correct password") print("---------------------------")
from replace import replace from s3 import s3 replace() #Modify the url of images and fonts s3() #To store all the directory and subdirectory in public folder
def save(self): """ Saves the layer in selected folder in user's irods datastore """ try: obj = self.auth_object['sess'].data_objects.create(str(self.current_store+'/'+self.current_layer)) except: r = replace() selection = r.selection print selection if not selection: new_name = new_file_name() val = new_name.file_name if val: if self.current_layer[-4:] == '.shp': self.save_shp(self.current_store+'/'+val) else: name = self.current_store+'/'+val+self.current_layer[-4:] name = str(name) obj = self.auth_object['sess'].data_objects.create(name) file = obj.open('r+') file2 = open(self.sources[self.current_layer],'r+') payload = file2.read() file.write(payload) file.close() file2.close() else: e = error("Cannot leave blank") pass else: if self.current_layer[-4:] =='.shp': self.save_shp(self.current_store+'/'+self.current_layer[:-4],get=True) else: obj = self.auth_object['sess'].data_objects.get(str(self.current_store+'/'+self.current_layer)) file2 = open(self.sources[self.current_layer],'r+') payload = file2.read() file = obj.open('r+') file.write('') file.write(payload) file.close() file2.close() else: if self.current_layer[-4:] == '.shp': self.save_shp(str(self.current_store+'/'+self.current_layer[:-4])) else: file = obj.open('r+') file2 = open(self.sources[self.current_layer],'r+') payload = file2.read() file.write(payload) file.close() file2.close() self.pushButton_2.clicked.disconnect() self.pushButton_2.setText(_translate("Dialog", "Save to iRods", None)) self.pushButton_2.setEnabled(False) self.pushButton_2.clicked.connect(self.save_to_irods) self.pushButton_6.clicked.disconnect() self.pushButton_6.setText(_translate("Dialog", "Log Out", None)) self.pushButton_6.clicked.connect(self.logout) self.frame_2.setGeometry(QtCore.QRect(254, 10, 0, 0)) self.pushButton_4.setEnabled(False) self.create_export_tree()
import os from os.path import join, getsize from replace import replace for root, dirs, files in os.walk("."): for filename in files: filepath = os.sep.join([root, filename]) realpath = os.path.realpath(filepath) _, ext = os.path.splitext(realpath) # print ext if ext in [".html", ".js"]: print realpath replace(realpath, "myApp", "maboApp")
#strip file extensions from names fontList = [f[0:-4] for f in fontFilesList] #for every font with a .ttf in Fonts\ but no corresponding folder for font in fontList: thisFontDir = fontsDir + '\\' + font if not exists(thisFontDir): #make directory in Fonts\ makedirs(thisFontDir) #call changeFontName.py with argument font changeFontName(font) #change name of font within config.bmfc in two places replace('config.bmfc', '(?<=fontName=)(.*)', font) replace('config.bmfc', '(?<=fontFile=Fonts/)(.*?)(?=\.ttf)', font) #for each of the characters '0' - '9' for charNum in range(48, 58): #edit the config file to update the character we want a .png of replace('config.bmfc', 'chars=..', 'chars=' + str(charNum)) #call bmfont to create the .png subprocess.run([ 'bmfont.com', '-c', 'config.bmfc', '-o', 'Fonts/' + font + '/' + str(charNum - 48) + '.fnt' ]) #delete all the .fnt files for f in listdir(thisFontDir):
dropped = 0 req = requests.get(url + userId).json() noofanime = int(len((req['items']))) #print('Number of anime:' + str(noofanime) + '\n') for a in range(noofanime): nanimeId.append(req['items'][a]['animeId']) nstatus.append(req['items'][a]['status']) #arranging status print('Replacing status: ') for b in range(noofanime): w, c, p, h, d, = 0, 0, 0, 0, 0 Id, w, c, p, h, d = (replace.replace(nstatus[b])) aanimeId.append(Id) watching += w completed += c planned += p hold += h dropped += d atotal = watching + completed + planned + hold + dropped print('Done! ') #replacing Id's print('Replacing ID\'s: ') for c in range(noofanime): aanimeId.append(converter.converter(nanimeId[c]))
warnings = 3 aval = [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" ] print("You have {} warnings left".format(warnings)) while True: print("-------------") print("You have {} guesses left".format(guesses)) print("Available letters: ", *aval) guess = input("Please guess a letter: ").lower() if guess in SecretWord and guess in aval: replace(SecretWord, guess, table) print("Good Guess: ", *table) remover(guess, aval) elif guess not in aval and check(guess): warnings -= 1 print("Oops! You've already guessed that letter.") print("You have", warnings, "warnings left: ", *table) elif guess not in SecretWord and check(guess): print("Oops! That letter is not in my word: ", *table) remover(guess, aval) if vowelcheck(guess): guesses -= 2 else: guesses -= 1 else: print("Please enter valid character")
def Def_anal(switches, Skarn, Ave_basalt, tornado): # default current density of the electrolysers DPkW = 100 CO2_Tax = 0.0000000000000000000000001 mineCost = 5 heatCost = 3 maxSCM = 6 # 0.3 + 0.1286; eCost = 0.02 r = 0.08 CH = 1.25 kWhr_kg = 15 # Skarns are all over https://www.researchgate.net/profile/Ewan_Pelleter/publication/233801803_World_Skarn_Deposits_-_Skarns_of_Western_Europe/links/00b4952b00d0be7304000000/World-Skarn-Deposits-Skarns-of-Western-Europe.pdf # https://journals.lib.unb.ca/index.php/GC/article/view/3773 if Skarn == 1: CaO_Frac_Rock = 0.3 # 0.30 fraction of CaO in the source rock Al2O3_Frac_Rock = 0.05 # 0.05 fraction of Al2O3 in the source rock SiO2_Frac_Rock = 0.35 # 0.35 fraction of SiO2 in the source rock FeO_Frac_Rock = 0.00000000000001 # 0.00000000000001 Fe2O3_Frac_Rock = 0.25 # 0.25 fraction of Fe2O3 in the source rock MgO_Frac_Rock = 0.05 # 0.05 fraction of MgO in the source rock elif (Ave_basalt == 1): CaO_Frac_Rock = 0.26 # 0.30 fraction of CaO in the source rock 0.26 Al2O3_Frac_Rock = 0.14 # 0.05 fraction of Al2O3 in the source rock 0.14 SiO2_Frac_Rock = 0.55 # 0.35 fraction of SiO2 in the source rock 0.55 FeO_Frac_Rock = 0.05 # 0.00000000000001 0.05 Fe2O3_Frac_Rock = 0.00 # 0.25 fraction of Fe2O3 in the source rock 0.00 MgO_Frac_Rock = 0.00 # 0.05 fraction of MgO in the source rock 0.00 else: CaO_Frac_Rock = 0.305 # 0.30 fraction of CaO in the source rock 0.25 0.155 0.105 Al2O3_Frac_Rock = 0.05 # 0.05 fraction of Al2O3 in the source rock 0.05 0.1 0.14 SiO2_Frac_Rock = 0.595 # 0.35 fraction of SiO2 in the source rock 0.65 0.6 0.6 FeO_Frac_Rock = 0.05 # 0.00000000000001 0.05 .1 .11 Fe2O3_Frac_Rock = 0.000000000000000000000001 # 0.25 fraction of Fe2O3 in the source rock 0 0 0 MgO_Frac_Rock = 0.00000000000000000000000001 # 0.05 fraction of MgO in the source rock o 0.045 0.045 CO2int = 56.1 / 1000 # T CO2/GJ heat eCO2int = 3.8440e-04 # T CO2/kWhr S_Cost = 20 # $/T buying sulfur CF = 0.97 # Kiln Capacity Factor TPY = 1000000 * CF # Kiln Output clinker in tonnes per year SA_ratio = 0.65 # concentration H2SO4 from the electrolyzer Eff = 0.08 # heat to power efficiency for organic rankine cycle, if used Rev = -0.000000000000000000000000001 PPT_SCM = 60 # $/T SCM PPT_F = 90 PPT_Al = 300 PPT_Agg = 15 V = 0.56 W = 2.305 CD = 0.000000000000000000000000000001 Al_eff = 0.9 Fe_eff = 0.9 SCM_eff = 0.9 OPC_eff = 0.9 Agg_eff = 0.9 CapEx_Fac = 1 switches = replace(switches) constants = np.array([ DPkW, CO2_Tax, CH, mineCost, heatCost, maxSCM, eCost, kWhr_kg, r, CaO_Frac_Rock, Al2O3_Frac_Rock, SiO2_Frac_Rock, FeO_Frac_Rock, Fe2O3_Frac_Rock, MgO_Frac_Rock, CO2int, eCO2int, PPT_SCM, S_Cost, CF, TPY, SA_ratio, Eff, Rev, PPT_F, PPT_Al, PPT_Agg, V, W, CD, Al_eff, Fe_eff, SCM_eff, OPC_eff, Agg_eff, CapEx_Fac ]) ############################################################################################## # turn constants that are to be compared into zeros, and make the array a cell so it can have both doubles and arrays in the array input_a = constants * switches sensitiv_anal_vect = input_a # Python Syntax: np.arange(start, stop, step) # vs # MATLAB Syntax: start: step: stop DPkW = np.arange(50, 1050, 50) # 50:50: 1000 CO2_Tax = np.arange(-190, 110, 10) #-190:10: 100 CH = np.arange(1, 4.05, 0.05) # 1:.05: 4 mineCost = np.arange(1, 11, 1) #1:1: 10 heatCost = np.arange(1, 11, 1) # 1:1: 10 maxSCM = np.arange(0.4, 5.4, 0.1) # 0.4:0.1: 5.3 eCost = np.arange(0.01, 0.11, 0.01) # 0.01:0.01: 0.1 kWhr_kg = np.arange(5, 31, 1) # 5:1: 30 r = np.arange(0.04, 0.31, 0.01) #0.04:0.01: 0.3 CaO_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 Al2O3_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 SiO2_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 FeO_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 Fe2O3_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 MgO_Frac_Rock = np.arange(0.05, 0.41, 0.01) # 0.05:0.01: 0.4 CO2int = np.arange(0, ((200 / 1000) + 0.005), (5 / 1000)) # 0:(5 / 1000): (200 / 1000); eCO2int = np.arange(0, 10e-04, 1e-04) # 0:0.1e-04: 10e-04; S_Cost = np.arange(0, 210, 10) # 0:10: 200; CF = np.arange(0.15, 1.01, 0.01) # 0.15:0.01: 1; TPY = np.arange(1000000, 5400000, ((5000000 - 1000000) / 10)) # 1000000:(5000000 - 1000000) / 10: 5000000; SA_ratio = np.arange(0.1, 0.75, 0.05) #0.1:0.05: 0.7; Eff = np.arange(0.0, 0.31, 0.01) # 0.0:0.01: 0.3; Rev = np.arange(0, 210, 10) # 0:10: 200; PPT_SCM = np.arange(0, 110, 10) # 0:10: 100; PPT_F = np.arange(0, 101, 1) # 0:1: 100; PPT_Al = np.arange(0, 410, 10) # 0:10: 400; PPT_Agg = np.arange(0, 21, 1) # 0:1: 20; V = np.arange(0.2, 1.25, 0.05) # 0.2:0.05: 1.2; W = np.arange(0.1, 5.1, 0.1) # 0.1:0.1: 5; CD = np.arange(-0.4, 0.5, 0.1) # -0.4:0.1: 0.4; Al_eff = np.arange(0.5, 1.1, 0.1) # 0.5:0.1: 1; Fe_eff = np.arange(0.5, 1.1, 0.1) # 0.5:0.1: 1; SCM_eff = np.arange(0.5, 1.1, 0.1) # 0.5:0.1: 1; OPC_eff = np.arange(0.5, 1.0, 0.1) # 0.5:0.1: 0.9; Agg_eff = np.arange(0.5, 1.1, 0.1) # 0.5:0.1: 1; CapEx_Fac = np.arange(0.5, 2.1, 0.1) # 0.5:0.1: 2; variables = np.array([ DPkW, CO2_Tax, CH, mineCost, heatCost, maxSCM, eCost, kWhr_kg, r, CaO_Frac_Rock, Al2O3_Frac_Rock, SiO2_Frac_Rock, FeO_Frac_Rock, Fe2O3_Frac_Rock, MgO_Frac_Rock, CO2int, eCO2int, PPT_SCM, S_Cost, CF, TPY, SA_ratio, Eff, Rev, PPT_F, PPT_Al, PPT_Agg, V, W, CD, Al_eff, Fe_eff, SCM_eff, OPC_eff, Agg_eff, CapEx_Fac ], dtype=object) # put the arrays that are to be compared into the sensitive_anal_vec sens_analysis = [[i] for i in sensitiv_anal_vect] sensitiv_anal_vect = [i for i in sensitiv_anal_vect] check = sensitiv_anal_vect[0] for i in range(len(sensitiv_anal_vect)): if sensitiv_anal_vect[i] == 0: # add the standard values of the array to be compared to the end of the cell sensitiv_anal_vect = np.append(sensitiv_anal_vect, constants[i]) sensitiv_anal_vect = [i for i in sensitiv_anal_vect] sensitiv_anal_vect[i] = variables[i] sens_analysis[i] = variables[i] if tornado == 1: sensitiv_anal_vect = variables return sensitiv_anal_vect, constants, sens_analysis
def _addtimestamp(content,reg,base_dir,force_abspath=False): ''' 以base_dir为基础目录,在content中搜寻匹配reg的URL并尝试追加时间戳 reg:匹配URL的正则,其中\3为URL force_abspath:强制路径为绝对路径,即以WD为base_dir ''' iters=re.finditer(reg,content,re.I|re.M) t=C('timestamp_name') for it in reversed(list(iters)): start = content[0:it.start(1)] url = it.group(3) end = content[it.end(1):] local_url = replace(url) parsed_url = urlparse(local_url) parsed_query = parse_qs(parsed_url.query) #已经有时间戳的不再添加 #带协议的不再添加 if not local_url or not parsed_url.path: continue #具有模板语法的不予添加 elif parsed_url.path.find('{{') >=0 or parsed_url.path.find('{%') >= 0: log.warn('twig syntax found in %s'%parsed_url.path) continue elif re.match(r'^\s*(about:|data:|#)',local_url): log.warn('%s is an invalid url'%local_url) continue elif parsed_query.get(t) is not None: log.warn("%s has a timestamp"%local_url) continue elif parsed_url.scheme or local_url.startswith('//'): log.warn("%s has a scheme"%local_url) continue if os.path.isabs(parsed_url.path) or force_abspath: #绝对路径,则以当前工程根目录为root timestamp = utils.get_file_timestamp(utils.abspath(base_dir + parsed_url.path)) else: #相对目录,则此当前文件目录为root #应该仅在CSS内使用相对路径 timestamp = utils.get_file_timestamp(os.path.join(base_dir,parsed_url.path)) #计算不到时间戳则忽略 if not timestamp: continue parsed_url = urlparse(url) new_query = parsed_url.query if '' == new_query: new_query = t+"="+timestamp else: new_query+='&%s=%s'%(t,timestamp) if '' == parsed_url.fragment: new_fragment = '' else: new_fragment = '#'+parsed_url.fragment if not parsed_url.scheme: new_scheme = '' else: new_scheme = parsed_url.scheme+"://" new_url = new_scheme + parsed_url.netloc + parsed_url.path + '?' + new_query + new_fragment content = start + (it.group(2) or '') + new_url + (it.group(2) or '') + end return content
import os import replace as r import bigfloat import numpy as np file_1 = open("delete_reactions_20", 'r') file_2 = open("delete_reactions_edit", 'w') lib = r.replace(file_1, file_2) file_1.close() file_2.close() f = open('delete_reactions_edit','r') f1 = open('res_sec','w') line = f.readline() while line: array = [] #print(f.readline().strip()) f1.write(f.readline().strip()) line = f.readline() for x in line.split(): array.append(float(x)) line = f.readline() for x in line.split(): array.append(float(x)) i = 1 T = 6 su = 0 while i < 6: su += array[i]*T**((2*i-5)/3) i += 1 su = bigfloat.exp(array[0] + array[6]*np.log(T) + su, bigfloat.precision(7)) # su = '%.3e' % su #print(su)