def genHero(self): #print("g0") t4 = {} for t in self.loadWiki: # t = self.loadWiki[0] t3 = sorted(t.t2, key=t.t2.get, reverse=True)[:self.numWords] for x in t3: t4[x] = t.t2[x] #print("g1") #print(t4) #t3 = heapq.nlargest(10, t.t2, key=t.t2.get) #d = {"POwerful":1,"mightY":1.7, "ziPpy":2,"burn":0.5,"cold":0.7,"man":1,"vampire":2} ga = Gen() n = "" for idx, x in enumerate(self.loadWiki): if idx != 0: n += "-" n += x.name #print("g2") ga.name = n ga.loadKeyWords() self.matched = ga.calc(t4) #print("g3") ga.splitPool() #print("g4") ga.writeChar() #print("g5") self.searchFiles()
def SimpleExpression(): if lex() in {Lex.PLUS, Lex.MINUS}: op = lex() nextLex() p = loc.lexPos T = Term() testInt(T, p) if op == Lex.MINUS: Gen(cm.NEG) else: p = loc.lexPos T = Term() while lex() in {Lex.PLUS, Lex.MINUS}: op = lex() testInt(T, p) nextLex() p = loc.lexPos T = Term() testInt(T, p) if op == Lex.PLUS: Gen(cm.ADD) else: Gen(cm.SUB) return T
def Factor(): T = None if lex() == Lex.NUM: Gen(scan.num()) nextLex() T = Types.Int elif lex() == Lex.NAME: x = table.find(scan.name()) if type(x) == cat.Var: gen.Addr(x) Gen(cm.LOAD) nextLex() T = x.type elif type(x) == cat.Const: gen.Const(x.val) T = x.type nextLex() elif type(x) == cat.Func: nextLex() skip(Lex.LPAR) Function(x) T = x.type skip(Lex.RPAR) else: Expected("функция, константа или переменная") elif lex() == Lex.LPAR: nextLex() T = Expression() skip(Lex.RPAR) else: Expected("имя число или выражение в скобках") return T
def IfStatement(): skip(Lex.IF) p = loc.lexPos boolExpr() lastGOTO = 0 condPC = gen.PC skip(Lex.THEN) StatSeq() while lex() == Lex.ELSIF: Gen(lastGOTO) Gen(cm.GOTO) lastGOTO = gen.PC fixup(condPC, gen.PC) nextLex() p = loc.lexPos boolExpr() skip(Lex.THEN) StatSeq() if lex() == Lex.ELSE: nextLex() Gen(lastGOTO) Gen(cm.GOTO) lastGOTO = gen.PC fixup(condPC, gen.PC) StatSeq() else: fixup(condPC, gen.PC) skip(Lex.END) fixup(lastGOTO, gen.PC)
def gen(request, response, ext='.py'): d = dict(messages=dict(), service=dict()) for proto_file in request.proto_file: key = 'messages' # Parse request for item, package in traverse(proto_file): try: if hasattr(item, 'method'): key = 'service' d[key][item.name] = dict( package=proto_file.package or '<root>', filename=proto_file.name ) if hasattr(item, 'field'): d[key][item.name].update({ 'fields': [{'name': v.name} for v in item.field] }) if hasattr(item, 'value'): d[key][item.name].update({ 'values': [{'name': v.name, 'value': v.number} for v in item.value] }) if hasattr(item, 'method'): d[key][item.name].update({ 'methods': [{'name': v.name, 'input_type': v.input_type, 'output_type': v.output_type} for v in item.method] }) if isinstance(item, DescriptorProto): d[key][item.name].update({ 'type': 'Message', 'properties': [{'name': f.name, 'type': int(f.type)} for f in item.field] }) elif isinstance(item, EnumDescriptorProto): d[key][item.name].update({ 'type': 'Enum', 'values': [{'name': v.name, 'value': v.number} for v in item.value] }) except: pass # Fill response f = response.file.add() f.name = proto_file.name + ext g = Gen() g.load_from_dict(d) f.content = g.gen_tmpl()
def __init__(self, dir_name='serp_input/', verbose=False, pbed_file_prefix='fpb_pos', random=False): FCCGen.file_id += 1 Gen.__init__(self, dir_name, verbose) self.pbed_file_prefix_and_id = pbed_file_prefix + str(FCCGen.file_id) self.random = random
def create_pass(): gen = Gen() try: if request.method == 'POST': data = request.json password = gen.make_passwd(data) return json.dumps(password) except JSONDecodeError: raise JSONDecodeError
def test_abwachsel(self): G = Gen() gen1, gen2 = G.g1(), G.g2() gen_abwachs = G.abwechselnd(gen1, gen2) x = list(islice(gen_abwachs, 0, 10)) y = [] for i in range(1, 6): y.append(i) y.append(i**2) self.assertEqual(x, y)
def WhileStatement(): whilePC = gen.PC skip(Lex.WHILE) p = loc.lexPos boolExpr() condPC = gen.PC skip(Lex.DO) StatSeq() skip(Lex.END) Gen(whilePC) Gen(cm.GOTO) fixup(condPC, gen.PC)
def Module(): skip(Lex.MODULE) if lex() == Lex.NAME: modRef = table.new(cat.Module(scan.name())) nextLex() else: Expected("имя") skip(Lex.SEMI) L = lex() if lex() == Lex.IMPORT: Import() DeclSeq() if lex() == Lex.BEGIN: nextLex() StatSeq() skip(Lex.END) check(Lex.NAME) x = table.find(scan.name()) if x != modRef: Expected(f"имя модуля {modRef.name}") nextLex() skip(Lex.DOT) Gen(cm.STOP) AllocVars()
def execute_psp(self, pname, args): psp_file = PspConfig.psp_path + pname py_file = PspConfig.gen_path + pname.replace(".psp", ".py") if not os.path.isfile(py_file) or os.path.getmtime( psp_file) > os.path.getmtime(py_file): Gen().parse(psp_file, py_file) self.execute_py(py_file, args)
def Term(): p = loc.lexPos T = Factor() while lex() in {Lex.MULT, Lex.MOD, Lex.DIV}: op = lex() testInt(T, p) nextLex() p = loc.lexPos Factor() testInt(T, p) if op == Lex.MULT: Gen(cm.MULT) elif op == Lex.DIV: Gen(cm.DIV) else: Gen(cm.MOD) return T
def parse_apis(self, feature): j = self.get_json() apis = [] for x in range(len(j["api"])): apis.append( dict({ 'module': j["api"][x]["module"], 'detail': self.parse_module(j["api"][x]) })) Gen(self.project_name).gen_template(apis, feature)
def cascade_delete(self, tb_id): relation_model = Relation() share_model = Share() gen_model = Gen() rel_tbs = [tb_id] # 需要删除此表的所有依赖表 rel_tb_infos = [] relation_model.get_all_rel_tables(tb_id, rel_tb_infos) for tb_info in rel_tb_infos: rel_tbs.append(tb_info['tb_id']) for tb_id in rel_tbs: # 删除数据表 self.delete(tb_id) # 删除合表规则 gen_model._delete({Gen.TB_ID: tb_id}) # 删除分享表 share_model.delete_tb_share(tb_id) # 删除此表的依赖关系 relation_model.delete_rel_info(tb_id) self.logger.warn('delete tb: %s' % ','.join(rel_tbs))
def __init__(self, temp, name, mat_list, gen=Gen(), fill=None): assert isinstance(temp, Number), ''' temp is not a number:%r''' % temp assert isinstance(name, str), "name is not a string:%r" % name assert isinstance( mat_list, list), "%r mat_list is not a list:%r" % (name, mat_list) #for mat in mat_list: # assert isinstance(mat, Mat), '''mat_list contains non mat object: %r''' %mat self.mat_list = mat_list # self.collect_mat() self.gen = gen self.fill = fill CmpObj.__init__(self, temp, name)
def main(): binding.initialize() binding.initialize_native_target() binding.initialize_native_asmprinter() # inicializando módulo principal #module = Module.new('main') module = ir.Module('my_module') # inicializando a tabela de símbolos symbols = {} # inicializando o gerenciador de passos de otimização e funções passes = None # FunctionPassManager.new(module) # configurando o motor de execução ee = create_execution_engine() # configuração dos passos de otimização # registra os passos já realizados em uma estrutura para executar # passes.add(ee.target_data) # combina e remove instruções redundantes # passes.add(PASS_INSTCOMBINE) # reassocia instruções para otimizar aritmética de constantes # passes.add(PASS_REASSOCIATE) # elimina subexpressões comuns # passes.add(PASS_GVN) # remove blocos básicos sem predecessor, elimina nó PHI para blocos básicos # com um único predecessor e elimina blocos que contém salto incondicional # passes.add(PASS_SIMPLIFYCFG) # inicializa as otimizações # passes.initialize() code = '' if len(sys.argv) >= 2: # lexer = Lexer() f = open(sys.argv[1]) # lexer.test(f.read()) code = f.read() ''' while True: try: code = input('TPlusPlus => ') # code = './exemplo.tpp' # code = 'exemplo.tpp' except KeyboardInterrupt: print() break ''' driver = Gen(code, module, symbols, ee, passes, OPTIMIZATION, DEBUG) if SHOW_END_CODE: print('\n\n=== Código LLVM final ===') print(module)
def AssOrCall(): check(Lex.NAME) x = table.find(scan.name()) nextLex() if type(x) == cat.Module: module = x.name skip(Lex.DOT) check(Lex.NAME) pname = module + "." + scan.name() x = table.find(pname) if type(x) == cat.Proc: nextLex() if lex() == Lex.LPAR: nextLex() Procedure(x) skip(Lex.RPAR) else: Procedure(x) else: Expected("процедура") elif type(x) == cat.Proc: if lex() == Lex.LPAR: nextLex() Procedure(x) skip(Lex.RPAR) else: Procedure(x) elif type(x) == cat.Var: gen.Addr(x) skip(Lex.ASS) p = loc.lexPos T = Expression() if T != x.type: posError("Несоответствие типа", p) Gen(cm.SAVE) else: Expected("вызов процедуры или присваивание")
raise else: harbor_folder = os.path.join(os.getcwd(), '.harbor') os.chdir(harbor_folder) os.popen('git checkout %s' % git_branch) os.popen('git add .') os.popen('git commit -m "railgun site update...✅ "') os.popen('git push -u %s %s' % (git_url, git_branch)) def update_static_res(): static_folder = os.path.join(os.getcwd(), 'app/static') static_build_folder = os.path.join(os.getcwd(), 'app/build/static') if os.path.isdir(static_build_folder): shutil.rmtree(static_build_folder) shutil.copytree(static_folder, static_build_folder) if __name__ == '__main__': if len(sys.argv) > 1 and sys.argv[1] == 'build': _gen = Gen(app) _gen.gen() # update static resources update_static_res() elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload': first_upload() elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload': other_upload() else: manager.run()
""" Given a list of constraint rankings, run the typology. Either print out all the possibilities for inspection (warning: a lot!) or count the number of different typologies that arise. """ from collections import Counter import itertools from gen import Gen from constraints import ConstraintSet from tableau import Tableau gen = Gen() def print_full_typology(rankings): """Print the full set of results: ranking, winner, and winner type.""" for ranking in rankings: constraint_ranking = ConstraintSet(ranking) print("----------------------------------") print(constraint_ranking) print("----------------------------------") for inputs in gen.inputs(): tableau = Tableau(inputs, constraint_ranking, gen.candidates(inputs)) print(inputs, tableau.winners, tableau.typology) def print_count_typology(rankings): """Print each ranking and morphology count over all possible inputs."""
def test_gen1(self): G = Gen() gen = G.g1() x = list(islice(gen, 0, 10)) self.assertEqual(x, [i for i in range(1, 11)])
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from buf import Buf from gen import Gen from proc import Proc from evenement import Evenement import matplotlib.pyplot as plt temps = 0 temps_fin = 10 liste_comp = [Buf([Evenement.JOB, Evenement.DONE], [Evenement.REQ]), Gen([Evenement.JOB]), Proc([Evenement.REQ], [Evenement.DONE])] liste_temps = [] liste_q = [] while(temps <= temps_fin): ta_min = liste_comp[0].get_ta() for i in range(1, len(liste_comp)): tmp = liste_comp[i].get_ta() if ta_min > tmp: ta_min = tmp imminent = [] for comp in liste_comp: if comp.get_ta() == ta_min: imminent.append(comp) liste_ev_im = {} for im in imminent: evenement = im.f_lambda() for key in evenement:
def __init__(self, dir_name='serp_input/'): Gen.__init__(self, dir_name)
def __init__(self, dir_name='serp_input/', verbose=False, pbed_file_prefix = 'fpb_pos'): FCCGen.file_id += 1 Gen.__init__(self, dir_name, verbose) self.pbed_file_prefix_and_id = pbed_file_prefix+str(FCCGen.file_id)
def main(checkpoint, **args): task_id = setup_logging( 'gen', logging.NOTSET if args.get('debug', False) else logging.INFO) params = dict( { 'n_rnn': 3, 'dim': 1024, 'learn_h0': False, 'q_levels': 256, 'weight_norm': True, 'frame_sizes': [16, 16, 4], 'sample_rate': 16000, 'n_samples': 1, 'sample_length': 16000 * 60 * 4, 'sampling_temperature': 1, 'q_method': QMethod.LINEAR, }, exp=checkpoint, **args) logging.info(str(params)) logging.info('booting') # dataset = storage_client.list_blobs(bucket, prefix=path) # for blob in dataset: # blob.download_to_filename(blob.name) bucket = None if args['bucket']: logging.debug('setup google storage bucket {}'.format(args['bucket'])) storage_client = storage.Client() bucket = Bucket(storage_client, args['bucket']) preload_checkpoint(checkpoint, storage_client, bucket) results_path = os.path.abspath( os.path.join(checkpoint, os.pardir, os.pardir, task_id)) ensure_dir_exists(results_path) checkpoint = os.path.abspath(checkpoint) tmp_pretrained_state = torch.load( checkpoint, map_location=lambda storage, loc: storage.cuda(0) if args['cuda'] else storage) # Load all tensors onto GPU 1 # torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1)) pretrained_state = OrderedDict() for k, v in tmp_pretrained_state.items(): # Delete "model." from key names since loading the checkpoint automatically attaches it layer_name = k.replace("model.", "") pretrained_state[layer_name] = v # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v))) # Create model with same parameters as used in training model = SampleRNN(frame_sizes=params['frame_sizes'], n_rnn=params['n_rnn'], dim=params['dim'], learn_h0=params['learn_h0'], q_levels=params['q_levels'], weight_norm=params['weight_norm']) if params['cuda']: model = model.cuda() # Load pretrained model model.load_state_dict(pretrained_state) def upload(file_path): if bucket is None: return # remove prefix /app name = file_path.replace(os.path.abspath(os.curdir) + '/', '') blob = Blob(name, bucket) logging.info('uploading {}'.format(name)) blob.upload_from_filename(file_path) (_, dequantize) = quantizer(params['q_method']) gen = Gen(Runner(model), params['cuda']) gen.register_plugin( GeneratorPlugin(results_path, params['n_samples'], params['sample_length'], params['sample_rate'], params['q_levels'], dequantize, params['sampling_temperature'], upload)) gen.run()
from gen import Gen inf = 999999 L = [[inf, inf, 1, 1, 1], [inf, inf, inf, 1, inf], [1, inf, inf, inf, inf], [1, 1, inf, inf, inf], [1, inf, inf, inf, inf]] # dw = DataWorker(L) gen = Gen(6, 7, L) n = len(L) vers = list(range(0, n)) psps = gen.get_possible_shapes(n) print("Возможные размеры: ", psps) gen.find_bob(psps, True) # tmp = gen.init_gen(psps[1]) # print(tmp) # bst = gen.find_best(tmp) # print(bst)
addr = self.gen.set_new_reg("M[FP+%d]" % self.get_symbol(name).addr) if negate: addr = self.gen.set_new_reg("-1 * R[%d]" % addr) # keep track of what register contains this symbols value self.get_symbol(name).current_reg = addr return (addr, self.get_symbol(name).type) if __name__ == "__main__": import sys from scanner import Scanner from gen import Gen gen = Gen() scanner = Scanner(sys.argv[1]) parser = Parser(scanner, gen) if scanner.has_errors or parser.has_errors: print "-"*50 print "BUILD FAILED" sys.exit(1) print "" print "Global Procedures" print "-"*50 for x in parser.global_symbols: if not parser.global_symbols[x].type == 'procedure': continue print parser.global_symbols[x]
def gen(request, response, ext='.py'): d = dict(messages=dict(), service=dict()) for proto_file in request.proto_file: key = 'messages' # Parse request for item, package in traverse(proto_file): try: if hasattr(item, 'method'): key = 'service' d[key][item.name] = dict(package=proto_file.package or '<root>', filename=proto_file.name) if hasattr(item, 'field'): d[key][item.name].update( {'fields': [{ 'name': v.name } for v in item.field]}) if hasattr(item, 'value'): d[key][item.name].update({ 'values': [{ 'name': v.name, 'value': v.number } for v in item.value] }) if hasattr(item, 'method'): d[key][item.name].update({ 'methods': [{ 'name': v.name, 'input_type': v.input_type, 'output_type': v.output_type } for v in item.method] }) if isinstance(item, DescriptorProto): d[key][item.name].update({ 'type': 'Message', 'properties': [{ 'name': f.name, 'type': int(f.type) } for f in item.field] }) elif isinstance(item, EnumDescriptorProto): d[key][item.name].update({ 'type': 'Enum', 'values': [{ 'name': v.name, 'value': v.number } for v in item.value] }) except: pass # Fill response f = response.file.add() f.name = proto_file.name + ext g = Gen() g.load_from_dict(d) f.content = g.gen_tmpl()
def Function(x: cat.Func): if x.name == "ABS": intExpr() Gen(cm.DUP) gen.Const(0) Gen(gen.PC + 3) Gen(cm.IFGE) Gen(cm.NEG) elif x.name == "MIN": check(Lex.NAME) x = table.find(scan.name()) if type(x) != cat.Type: Expected("имя типа") nextLex() Gen(scan.MAXINT) Gen(cm.NEG) Gen(1) Gen(cm.SUB) elif x.name == "MAX": check(Lex.NAME) x = table.find(scan.name()) if type(x) != cat.Type: Expected("имя типа") nextLex() gen.Const(scan.MAXINT) elif x.name == "ODD": intExpr() Gen(1) Gen(cm.MOD) Gen(0) Gen(cm.IFNE) else: assert False
DebugLog("igore :" + dbNumberName) continue Link = "" if dbDoubanID != "": Link = {'site': 'douban', 'sid': dbDoubanID} elif dbIMDBID != "": Link = {'site': 'douban', 'sid': dbIMDBID} else: ErrorLog("empty link:" + dbNumberName) continue tSeconds = random.randint(60, 500) DebugLog("sleep {} Seconds:".format(tSeconds)) time.sleep(tSeconds) DebugLog("begin :" + dbNumberName) try: tMovieInfo = Gen(Link).gen(_debug=True) except Exception as err: print(err) ErrorLog("failed to gen:" + dbNumberName) continue if not tMovieInfo["success"]: print(tMovieInfo["error"]) ErrorLog("failed to request from douban:" + dbNumberName) continue if tMovieInfo['episodes'] == "": tMovieInfo['episodes'] = '0' if tMovieInfo['year'] == "": tMovieInfo['year'] = '0' tNation = (tMovieInfo['region'][0]).strip() tYear = int(tMovieInfo['year'])
def Procedure(x): if x.name == "HALT": gen.Const(ConstExpr()) Gen(cm.STOP) elif x.name == "INC": Variable() Gen(cm.DUP) Gen(cm.LOAD) if lex() == Lex.COMMA: nextLex() intExpr() else: gen.Const(1) Gen(cm.ADD) Gen(cm.SAVE) elif x.name == "DEC": Variable() Gen(cm.DUP) Gen(cm.LOAD) if lex() == Lex.COMMA: nextLex() intExpr() else: gen.Const(1) Gen(cm.SUB) Gen(cm.SAVE) elif x.name == "In.Int": Variable() Gen(cm.IN) Gen(cm.SAVE) elif x.name == "Out.Int": intExpr() skip(Lex.COMMA) intExpr() Gen(cm.OUT) elif x.name == "Out.Ln": Gen(cm.LN) elif x.name == "In.Open": pass
steam_link_list = [ "http://store.steampowered.com/app/20650135465430/", # Steam Not Exist "http://store.steampowered.com/app/550/", # Steam Short Link (Store) "http://store.steampowered.com/app/240720/Getting_Over_It_with_Bennett_Foddy/", # Steam Full Link "https://steamcommunity.com/app/668630", # Another Type of Steam Link (Hub) "http://store.steampowered.com/app/420110", # Steam Link With Age Check (One click type) "http://store.steampowered.com/app/489830/", # Steam Link With Age Check (Birth Choose type) "https://store.steampowered.com/app/517630/Just_Cause_4/" # New Age Check pass in Steam "https://store.steampowered.com/app/968790", # Fix tag class miss ] other_link_list = [ "http://jdaklvhgfad.com/adfad", # No support link ] test_link_list = [] # test_link_list.extend(douban_link_list) # test_link_list.extend(imdb_link_list) # test_link_list.extend(bgm_link_list) # test_link_list.extend(steam_link_list) # test_link_list.extend(other_link_list) for link in test_link_list: print("Test link: {}".format(link)) gen = Gen(link).gen(_debug=True) if gen["success"]: print("Format text:\n", gen["format"]) else: print("Error : {}".format(gen["error"])) print("--------------------")
for i in range(p): for y in range(p): if c.has_point(i, y): point = Point(c, i, y) p2 = point #n = 1 list_point = [] list_point.append(point) while True: tmp = point point = point + p2 if point == inf: break list_point.append(point) #n = n + 1 list_gen.append(Gen(tmp, (len(list_point) + 1))) for gen in list_gen: print(gen) ## Choix du generateur gen = list_gen[8] ## Generation des cles pour Alice et Bob privB, pubB = gen.gen_keys() privA, pubA = gen.gen_keys() ##Question 1 & Question 2 : ## On cree un message en forme de point eliptirque de la courbe m = gen.point * 90
def AllocVars(): vars = table.getVars() for var in vars: fixup(var.addr, gen.PC) var.addr = gen.PC Gen(0)