def spoofing(self, hostname, dns, rawdata, sock): fromip = self.client_address[0] for ipp,rev in self.server.resolvs: if match(ipp, fromip): for hnp,ip in rev: if match(hnp, hostname): return ip return self.queryip(hostname) return self.queryip(hostname)
def test_process(): from util import read_file_to_list from util import match rules, rows = read_file_to_list("testinput.txt") assert 6 == len(rules) assert 5 == len(rows) assert True == match(rules, rows[0]) assert False == match(rules, rows[1]) assert True == match(rules, rows[2]) assert False == match(rules, rows[3]) assert False == match(rules, rows[4])
def check(self, imgSrc) -> bool: self.imgSrc = ac.imread(imgSrc) res = util.match(self.imgSrc, self.imgSign, 0.9) if res != None : self.y0 = int(res['rectangle'][3][1] + 80*paras.SCALE) return True return False
def match(self, text): """Personal match.""" if text in ['i', 'me']: return self elif text == 'here': return self.location else: return util.match(text, set(self.contents + self.location.contents + [self.location, self]))
def check(self, imgSrc) -> bool: self.imgSrc = ac.imread(imgSrc) res = util.match(self.imgSrc, self.imgSign) # print(res) if res != None : self.point = [int(res['result'][0]), int(res['result'][1])] return True return False
def test_process(): from util import read_file_to_list from util import match rules, rows = read_file_to_list("testinput2.txt") valid = 0 for row in rows: if match(rules, row): valid += 1 assert valid == len(rows)
def test(task_name, func, subset='train'): with open(f'ARC/data/training/{task_name}.json') as f: j = json.load(f) correct = True for i, t in enumerate(j[subset]): input_grid = np.array(t['input']) pred = func(input_grid) # vis(pred) target = np.array(t['output']) correct &= match(pred, target) return correct
def processa_paises(): who_doctors, who_nurses = util.read_pais() # remove duplicatas, deixando apenas os dados mais recentes util.drop_pais_dupl(who_doctors, "Country") util.drop_pais_dupl(who_nurses, "Country") # cada bloco a seguir cria um atributo da tabela final # a lista de paises nao eh igual entre os datasets # para igualar, concatenamos e fazemos os ajustes pertinentes nome = [] nome_aux = pd.concat([who_doctors["Country"], who_nurses["Country"]], join='inner', ignore_index=True) nome_aux.drop_duplicates(inplace=True) nome_series = nome_aux.reset_index() # reseta indices nome_series.drop(["index"], axis=1, inplace=True) # remove indices antigos for i in range(len(nome_series)): nome.append(nome_series["Country"][i]) nome.sort(key=str.lower) # organiza em ordem alfabetica med_total = util.match(nome, who_doctors, "Medical doctors (number)", "Country") med_10k = util.match(nome, who_doctors, "Medical doctors (per 10 000 population)", "Country") enf_total = util.match(nome, who_nurses, "Nursing and midwifery personnel (number)", "Country") enf_10k = util.match(nome, who_nurses, "Nursing and midwifery personnel (per 10 000 population)", "Country") dados = {'País': nome, 'Profissionais de enfermagem - Total': enf_total, 'Profissionais de enfermagem a cada 10k habitantes': enf_10k, 'Médicos - Total': med_total, 'Médicos a cada 10k habitantes': med_10k} df = pd.DataFrame(data=dados) util.write_to_csv(df, "Mundo (OMS)") stats.stats_paises(pd.read_csv("../data/processed/Mundo (OMS).csv"))
def test_match(): s = '\n' s += str(util.match('abc', 'a')) s += '\n' s += str(util.match('abc123', 'bc')) s += '\n' s += str(util.match('abc123', '123')) s += '\n' s += str(util.match('abc 123', '\s')) s += '\n' s += str(util.match('abc 123xyz', '\d')) s += '\n' s += str(util.match('abc123', '^abc')) s += '\n' s += str(util.match('abc123', '^123')) s += '\n' s += str(util.match('abc', 'z')) return s
def test(task_name, func_string, subset='train'): with open(f'ARC/data/training/{task_name}.json') as f: j = json.load(f) prog = parse(func_string) correct = True for i, t in enumerate(j[subset]): input_grid = np.array(t['input']) prog_with_input = ['define', 'grid', input_grid, prog] pred = eval(prog_with_input) #print(pred) # vis(pred) target = np.array(t['output']) correct &= match(pred, target) return correct
def answer(self): if self.problem is None: return answer = self.store.find(self.problem) if answer is None: print("answer not found") self.tapAnswer(0) self.captureAnswer() else: res = util.match(self.imgSrc, answer) if res is None: print("answer not match") self.tapAnswer(0) self.captureAnswer() return point = int(res['result'][0]), int(res['result'][1]) #idx = util.findImg(self.answers, answer) #print("=====Find Answer!=====", idx) #self.tapAnswer(idx) print("======find answer======", point[0], point[1]) self.tapPosition(point[0], point[1]) time.sleep(1) #skip the middle status: right answer but wrong sign
def matchfind(): surveyname = request.args.get('surveyname', '') #write JS method for this username = session['user'] print surveyname, username return json.dumps(util.match(surveyname, username))
noise = Variable(noise, volatile=True) # total freeze netG y = Variable(netG(noise).data) f_enc_Y_D, f_dec_Y_D = netD(y) # compute biased MMD2 and use ReLU to prevent negative value mmd2_D = mix_rbf_mmd2(f_enc_X_D, f_enc_Y_D, sigma_list) mmd2_D = F.relu(mmd2_D) # compute rank hinge loss #print('f_enc_X_D:', f_enc_X_D.size()) #print('f_enc_Y_D:', f_enc_Y_D.size()) one_side_errD = one_sided(f_enc_X_D.mean(0) - f_enc_Y_D.mean(0)) # compute L2-loss of AE L2_AE_X_D = util.match(x.view(batch_size, -1), f_dec_X_D, 'L2') L2_AE_Y_D = util.match(y.view(batch_size, -1), f_dec_Y_D, 'L2') errD = torch.sqrt( mmd2_D ) + lambda_rg * one_side_errD - lambda_AE_X * L2_AE_X_D - lambda_AE_Y * L2_AE_Y_D errD.backward(mone) optimizerD.step() # --------------------------- # Optimize over NetG # --------------------------- for p in netD.parameters(): p.requires_grad = False for j in range(Giters):
def matchfind(): surveyname = request.args.get("surveyname", "") # write JS method for this username = session["user"] print surveyname, username return json.dumps(util.match(surveyname, username))
def button4Click(self): self.error = match(self.cfeats,self.rfm) self.button4["background"] = "lightgreen"
def match_headers(self,headers,hijackheaders): for k,v in hijackheaders.items(): if not k in headers or not match(v,headers[k]): return False return True
# Lam [[Char]] expr # Word [Char] # Number Num # Character Char App = lambda es: lambda app,lam,word,number,character:app(es) Lam = lambda w,e: lambda app,lam,word,number,character:lam(w,e) Word = lambda n: lambda app,lam,word,number,character:word(n) Number = lambda d: lambda app,lam,word,number,character:number(d) Character = lambda c: lambda app,lam,word,number,character:character(c) getChildren = lambda n:n(Const([]),lambda _,la: la) getName = lambda l: l(lambda t:t.d,lambda _,__:'') toExprST = Y(lambda f: lambda tr:tr( lambda t:match(t.n)([ ('T_word',lambda w: Word(t.d)), ('T_number',lambda d: Number(int(t.d))), ('T_char',lambda c: Character(t.d)) ])(lambda a:Const(a)(print(a))), lambda b,la: match(b)([ ('P_expr',lambda _: let(list(map(f,la)),lambda la: # fold(App)(la[0])(la[1:]) App(la) )), ('P_abst',lambda _: let(list(map(getName,getChildren(la[0]))),lambda largs: Lam(largs,f(la[1])) )) ])(lambda a:Const(a)(print(a))) )) # toListOfExprs as RoseTree Tok [Char] -> Maybe [([Char],expr)] toListOfExprs = lambda tr: (
one_side_errD_thinned = one_sided( f_enc_X_D.mean(0) - thinned_f_enc_Y_D.mean(0)) except Exception as e: print('D: Thinning f_enc_Y: Error: {}'.format(e)) pdb.set_trace() # Unthinned hinge loss. one_side_errD_unthinned = one_sided( f_enc_X_D.mean(0) - f_enc_Y_D.mean(0)) # Choose which hinge loss you want. if not weighted: one_side_errD = one_side_errD_unthinned else: one_side_errD = one_side_errD_thinned # compute L2-loss of AE L2_AE_X_D = util.match(x.view(batch_size, -1), f_dec_X_D, 'L2') L2_AE_Y_D = util.match(y.view(batch_size, -1), f_dec_Y_D, 'L2') # Also compute AE loss on subsets of zeros and ones. try: if len(x_eval_1s) or len(x_eval_dec_1s): L2_AE_X1_D = util.match(x_eval_1s.view(len(x_eval_1s), -1), x_eval_dec_1s, 'L2') else: L2_AE_X1_D = Variable( torch.from_numpy(np.array([0 ])).type(torch.FloatTensor)) except Exception as e: print('D: Computing x1 ae error. Error: {}'.format(e)) pdb.set_trace() try: if len(x_eval_0s) or len(x_eval_dec_0s):