def test_richcompare(self): self.assertIs(complex.__eq__(1+1j, 1<<10000), False) self.assertIs(complex.__lt__(1+1j, None), NotImplemented) self.assertIs(complex.__eq__(1+1j, 1+1j), True) self.assertIs(complex.__eq__(1+1j, 2+2j), False) self.assertIs(complex.__ne__(1+1j, 1+1j), False) self.assertIs(complex.__ne__(1+1j, 2+2j), True) for i in range(1, 100): f = i / 100.0 self.assertIs(complex.__eq__(f+0j, f), True) self.assertIs(complex.__ne__(f+0j, f), False) self.assertIs(complex.__eq__(complex(f, f), f), False) self.assertIs(complex.__ne__(complex(f, f), f), True) self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented) self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j) self.assertRaises(TypeError, operator.le, 1+1j, 2+2j) self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j) self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j) self.assertIs(operator.eq(1+1j, 1+1j), True) self.assertIs(operator.eq(1+1j, 2+2j), False) self.assertIs(operator.ne(1+1j, 1+1j), False) self.assertIs(operator.ne(1+1j, 2+2j), True)
def test_convert_return_value(): builtin = import_module("__builtin__") operator = import_module("operator") assert builtin.int(32) == 32 assert builtin.float(3.123) == 3.123 for s in ['a string']: # TODO 'a string \00yep']: assert builtin.str(s) == s u = u"some буквы are странные" assert builtin.unicode(u) == u t = (1, (2.3,)) assert builtin.tuple(t) == t d = {'a': 'b', 1: 2} assert builtin.dict(d) == d lst = ['a', 1, [2]] assert builtin.list(lst) == lst assert builtin.bool(True) is True assert builtin.bool(False) is False assert builtin.bool(None) is False assert operator.eq(None, None) is True assert operator.eq(None, False) is False
def updateHosts(hostFilePath , updateFile , fileName): filehandler = open(hostFilePath + "\\"+fileName,'r+') filehandler.seek(0) alllines=filehandler.readlines(); tempFp = open(updateFile,'r+') tempFp.seek(0) isOutHostsUpdate = True hostsList = list() for eachLine in alllines: resultStartCode = operator.eq(eachLine ,"#UPDATE_SMART_HOST_START") resultEndCode = operator.eq(eachLine ,"#UPDATE_SMART_HOST_END") if resultStartCode: isOutHostsUpdate = False continue if resultEndCode: isOutHostsUpdate = True continue if isOutHostsUpdate: hostsList.append(eachLine) print(eachLine) filehandler.close() tempFp.writelines(hostsList) tempFp.close() tempFp = open(updateFile,'r') alllines = tempFp.readlines(); filehandler = open(hostFilePath + "\\"+fileName,'w') filehandler.writelines(alllines); filehandler.close()
def check_selected_mem(self,name=""): if eq(name,""): for mem in self.selected: info("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) else: for mem in self.selected: if eq(mem["name"],name): debug("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) self.Comment("#### %s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) checking_mem = mem for mem in self.selected: if ne(mem["name"],checking_mem["name"]): if checking_mem["start"] + checking_mem["size"] <= mem["start"]: pass elif checking_mem["start"] >= mem["start"] + mem["size"]: pass else: error("%s: start addr is 0x%x and size is 0x%x"%(checking_mem["name"],checking_mem["start"],checking_mem["size"])) error("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) self.Error_exit("selected mem %s and selected mem %s overlap!"%(checking_mem["name"],mem["name"])) else: pass for mem in self.spare_range: if checking_mem["start"] + checking_mem["size"] <= mem["start"]: pass elif checking_mem["start"] >= mem["start"] + mem["size"]: pass else: error("%s: start addr is 0x%x and size is 0x%x"%(checking_mem["name"],checking_mem["start"],checking_mem["size"])) error("spare mem: start addr is 0x%x and size is 0x%x"%(mem["start"],mem["size"])) self.Error_exit("selected mem %s and spare mem overlap!"%(checking_mem["name"]))
def Parse_pclmsi_log_jtrk(self,log_file): result = 0 fail_vectors = {} file_type = log_file.split(".")[-1] if not eq(file_type,"jtrk"): self.Error_exit("log type is not jtrk!") with open(log_file,'r') as fl: while True: line = fl.readline() if line: line = line.strip() if line[-1] != "}": warning("Skip the line, because of no end") continue line_decode = json.loads(line) if eq(line_decode["status"], "FAIL"): result = 1 if not fail_vectors.has_key(line_decode["avp"]): fail_vectors[line_decode["avp"]] = line fail_vector = os.path.join(self.base_name_path,line_decode["avp"]) Info(fail_vector,self.freglog) fail_log = self.Record_fail_log(fail_vector,line) self.Copy_ic_log(fail_vector,fail_log) else: break if len(fail_vectors) != 0: #pass self.Send_mail_vectors(fail_vectors) return result
def custAuth(name): session = requests.session() if operator.eq(name, 'dvwa'): # Fill in your details here to be posted to the login form. payload = { 'username': '******', 'password': '******', 'Login': '******' } # Use 'with' to ensure the session context is closed after use. p = session.post('http://127.0.0.1/dvwa/login.php', data=payload) return session if operator.eq(name, 'bodgeit'): # Fill in your details here to be posted to the login form. payload = { 'username': '******', 'password': '******', 'submit': 'Login' } # Use 'with' to ensure the session context is closed after use. p = session.post('http://127.0.0.1:8080/bodgeit/login.jsp', data=payload) return session
def test_login(self): """ Testing Login with ViaSat Credentials """ """ Looking for "Sign In" of application. If found click Else keep searching then exit if necessary""" while(not eq(self.driver.current_activity,".ui.signup.SignupActivity")): time.sleep(0.5) continue textview = self.driver.find_elements_by_class_name("android.widget.TextView") self.driver.implicitly_wait(30) textview[0].click() self.driver.implicitly_wait(30) # wait for sign-in page to load """ Login with ViaSat credentials """ textfield = self.driver.find_elements_by_class_name("android.widget.EditText") username = raw_input("\n\nUsername: "******""): username = constants.username textfield[0].send_keys(username) password = raw_input("Password: "******""): password = constants.password textfield[1].send_keys(password) self.driver.find_elements_by_class_name("android.widget.Button")[0].click() print "Signing In" while(not eq(self.driver.current_activity,".ui.profiles.ProfileSelectionActivity")): time.sleep(0.5) continue name = raw_input("\nPlease type name of your desired profile: ") while(True): try: print "Checking if input is invalid ..." profile = self.driver.find_element_by_android_uiautomator('new UiSelector().description("'+name+'")') profile.click() break; except Exception: name = raw_input("Not an exisitng user, please again: ") while(not eq(self.driver.current_activity,".ui.home.HomeActivity")): time.sleep(0.5) continue self.driver.implicitly_wait(30) self.driver.find_elements_by_class_name("android.widget.Button")[0].click() self.driver.implicitly_wait(30) choice = raw_input("What do you want to do?") valid = self.options(choice) while(not valid): choice = raw_input("Invalid option, try again:") valid = self.options(choice) self.driver.find_elements_by_class_name("android.widget.Button")[1].click() self.driver.implicitly_wait(30)
def parse_dictionary(filter_dict, model): """ Parse a dictionary into a list of SQLAlchemy BinaryExpressions to be used in query filters. :param filter_dict: Dictionary to convert :param model: SQLAlchemy model class used to create the BinaryExpressions :return list: List of conditions as SQLAlchemy BinaryExpressions """ if len(filter_dict) == 0: return [] conditions = [] for k, v in filter_dict.items(): # firts let's check with the expression parser try: conditions += parse('{0}{1}'.format(k, v), model) except ParseError: pass else: continue attr = getattr(model, k) if isinstance(attr, AssociationProxy): # If the condition is a dict, we must use 'any' method to match # objects' attributes. if isinstance(v, dict): conditions.append(attr.any(**v)) else: conditions.append(attr.contains(v)) elif hasattr(attr, 'property') and \ hasattr(attr.property, 'remote_side'): # a relation for fk in attr.property.remote_side: conditions.append(sqla_op.eq(fk, v)) else: try: new_op, v = parse_sqla_operators(v) attr_op = getattr(attr, new_op, None) if attr_op is not None: # try a direct call to named operator on attribute class. new_filter = attr_op(v) else: # try to call custom operator also called "generic" # operator in SQLAlchemy documentation. # cf. sqlalchemy.sql.operators.Operators.op() new_filter = attr.op(new_op)(v) except (TypeError, ValueError): # json/sql parse error if isinstance(v, list): # we have an array new_filter = attr.in_(v) else: new_filter = sqla_op.eq(attr, v) conditions.append(new_filter) return conditions
def do_date(datenode) : (day,month,year) = extractdate(datenode) if le(month,0) or gt(month,12) : daysinmonth = 0 elif eq(month,9) or eq(month,4) or eq(month,6) or eq(month,11) : daysinmonth = 30 elif eq(month,2) : if eq(mod(year,4),0) and (julian or (ne(mod(year,100),0) or eq(mod(year,400),0))) : daysinmonth = 29 else : daysinmonth = 28 else : daysinmonth=31 future = 0 if gt(year,toyear) : future = 1 elif eq(year,toyear) : if gt(month,tomonth) : future=1 elif eq(month,tomonth) and gt(day,today) : future=1 if gt(day,daysinmonth) or future : out("*") if lt(year,0) : cols(d(year),6) else : if lt(year,10) : out("0") if lt(year,100) : out("0") if lt(year,1000) : out("0") out(d(year)) if lt(month,10) : out("0") out(d(month)) if lt(day,10) : out ("0") out(d(day)+" ")
def options(self,choice): option = -1 if(eq(choice,"Settings")): option = 0 elif(eq(choice,"Sign Out")): option = 1 else: return False self.driver.find_elements_by_class_name("android.widget.ImageButton")[0].click() self.driver.find_elements_by_class_name("android.widget.LinearLayout")[option].click() return True
def GetActor(self, geom): """ return the VTK-Actor for a given ode body or geometry """ if type(geom) == Body: _find = lambda o: eq(o.geom.getBody(), geom) elif type(geom) == str: _find = lambda o: eq(o.geom.ident, geom) else: _find = lambda o: eq(o.geom, geom) for obj in self.obj: if _find(obj): return obj.act
def testBasic (self): s = eST(1) self.assertEqual(s, 1) c = eCT(2) self.assertEqual(c.value(), 2) instance = eSTs(1,2,3,4) self.assertEqual(4, len(instance.eST)) self.assertTrue(functools.reduce(operator.iand, map(lambda _i: operator.eq(1+_i, instance.eST[_i]), xrange(len(instance.eST))), True)) instance = eCTs(1,2,3,4) self.assertEqual(4, len(instance.eCT)) self.assertTrue(functools.reduce(operator.iand, map(lambda _i: operator.eq(1+_i, instance.eCT[_i].value()), xrange(len(instance.eCT))), True))
def update_data_mem(self,line, initial_data_seg,result_data_seg,smm_base = 0): #m = re.search(r'\[CPU0 (\w+)\]: LIN \w+ PHY 0x0000(\w+) \(len=(\d+), (\w+)\): 0x(\w+) 0x(\w+)',line) m = re.search(r'\[CPU0 (\w+)\]:.*PHY 0x0000(\w+) \(len=(\d+), (\w+)\): 0x(\w+)',line) if m: size = int(m.group(3),10) if size == 0x8: m = re.search(r'\[CPU0 (\w+)\]:.*PHY 0x0000(\w+) \(len=(\d+), (\w+)\): 0x(\w+) 0x(\w+)',line) size = int(m.group(3),10) data_all = self.adjust_data(size,m.group(5)+m.group(6)) # info("size is %d and data all is %s"%(size,data_all)) else: data_all= self.adjust_data(size, m.group(5)) # need to sync program format cmd = m.group(1) if smm_base == 0: addr = int(m.group(2),16) else: addr = int(m.group(2),16)&0xFFFF + smm_base mem_type = m.group(4) offset = addr%4 data_ptr_st= 0 #info("%s %s %s %s %s"%(m.group(1),m.group(2),m.group(3),m.group(4),m.group(5))) while True: mem_addr = "%08x"%(addr - addr%4) if size + offset<= 0x4: data_ptr_end = len(data_all) real_size = size else: data_ptr_end = 2 * (4-offset) + data_ptr_st real_size = 4 - offset data = data_all[data_ptr_st : data_ptr_end] data = self.adjust_data(real_size, data) # if eq("000fa204",mem_addr) or eq("000fa208",mem_addr) or eq("000fa20c",mem_addr): # info("real size is %d and data all is %s"%(real_size,data)) # info("s is %d and e is %d"%(data_ptr_st,data_ptr_end)) if eq(cmd,"RD"): if mem_addr in initial_data_seg.mem_lines: initial_data_seg.check_mem_line(mem_addr,offset,real_size,data) else: initial_data_seg.add_mem_line(mem_addr,offset,real_size,data) if mem_addr in result_data_seg.mem_lines: result_data_seg.update_mem_line(mem_addr,offset,real_size,data) else: result_data_seg.add_mem_line(mem_addr,offset,real_size,data) elif eq(cmd,"WR"): if mem_addr in result_data_seg.mem_lines: result_data_seg.update_mem_line(mem_addr,offset,real_size,data) else: result_data_seg.add_mem_line(mem_addr,offset,real_size,data) size = size - real_size addr = addr + 4 offset = 0 data_ptr_st = data_ptr_end if size <= 0: break
def check(x=9): a = eq(0,mod(x,3)) b = eq(0,mod(x,5)) if a & b: return "Fizz Buzz" elif a: return "Fizz" elif b: return "Buzz" else: return x
def Write_page(self): self.Comment("######################set page and cr3#######################") if eq(self.page_mode,"4KB_64bit"): self.Write_page_4K_64() elif eq(self.page_mode,"4KB_32bit"): self.Write_page_4K_32() elif eq(self.page_mode,"4MB"): self.Write_page_4M() elif eq(self.page_mode,"2MB"): self.Write_page_2M() elif eq(self.page_mode,"1GB"): self.Write_page_1G() else: Util.Error_exit("Invalid page mode!")
def removeGeom(self, geom): if type(geom) == Body: _find = lambda o: eq(o.geom.getBody(), geom) if type(geom) == str: _find = lambda o: eq(o.geom.ident, geom) else: _find = lambda o: eq(o.geom, geom) for obj in self.obj: if _find(obj): self.removeActor(obj.act) self.obj.remove(obj) del(obj) return True return False
def check_result(dump_file_instr,ic_file_instr,ic_file): global reload_num check_len = min(len(ic_file_instr),len(dump_file_instr)) for i in range(0,check_len): index = i addr_flag = eq(ic_file_instr[i]["addr"],dump_file_instr[i]["addr"]) data_flag = eq(ic_file_instr[i]["data"],dump_file_instr[i]["data"]) if addr_flag and data_flag: pass else: info("Ic file %s occur a mismatch and exit!"%(ic_file)) info("Instr %d, Addr %s, Data %s in ic file"%(i+10, ic_file_instr[i]["addr"],ic_file_instr[i]["data"])) info("Instr %d, Addr %s, Data %s in dump file"%(i+1,dump_file_instr[i]["addr"],dump_file_instr[i]["data"])) break info("Totally check %d instructions done and Tracer reload instr num is %d"%(index+1,reload_num-1))
def update_program_mem(self,line,initial_program,result_program,smm_base = 0): data_ptr_st= 0 #info(line) #m = re.search(r'LEN (\w+)',line) m = re.search(r'LEN (\d+) BYTES: (\w+) \[0x00(\w+)\]',line) if m: #info("LEN %s, BYTES %s, %s"%(m.group(1),m.group(2),m.group(3))) data_raw = m.group(2) if smm_base == 0: addr = int(m.group(3),16) else: addr = (int(m.group(3),16)&0xFFFF) + smm_base if eq(m.group(2),"66bb00ff0300"): data_raw = "66bb00ffF0CF"# modify bochs instruction addr for sync cnr if eq(m.group(2),"3c64"): data_raw = "3c00"# modify bochs instruction addr for sync cnr #info("%08x"%(smm_base)) #info("%08x"%(addr)) #info("%08x"%(int(m.group(3),16))) size = int(m.group(1),10) offset = addr%4 while True: mem_addr = "%08x"%(addr - addr%4) if size + offset<= 0x4: data_ptr_end = len(data_raw) real_size = size else: data_ptr_end = 2 * (4-offset) + data_ptr_st real_size = 4 - offset data = data_raw[data_ptr_st : data_ptr_end] data = self.adjust_data(real_size, data) # if eq(mem_addr,"000c0000"): # info("real size is %d, offset is %d, data is %s"%(real_size,offset,data)) if mem_addr in initial_program.mem_lines: initial_program.check_mem_line(mem_addr,offset,real_size,data) else: initial_program.add_mem_line(mem_addr,offset,real_size,data) if mem_addr in result_program.mem_lines: result_program.update_mem_line(mem_addr,offset,real_size,data) else: result_program.add_mem_line(mem_addr,offset,real_size,data) size = size - real_size addr = addr + 4 offset = 0 data_ptr_st = data_ptr_end if size <= 0: break
def check_list(qmp_o, key, val=None, check_item_in_pair=True): """ Check if the expect key, val are contained in QMP output qmp_o. :param qmp_o: output of QMP command :type qmp_o: list :param key: expect result :type key: str :param val: expect result :type val: str or None(if check_item_in_pair=False) :param check_item_in_pair: If expect result is dict (True) or str (False) :type check_item_in_pair: bool. :return check result :rtype: bool """ for element in qmp_o: if isinstance(element, dict): if _check_dict(element, key, val, check_item_in_pair): return True elif isinstance(element, list): if check_list(element, key, val, check_item_in_pair): return True elif element != '' and not check_item_in_pair: if strict_match: if operator.eq(key, element): return True else: if key in str(element): return True return False
def _deep_dict_eq(d1, d2): k1, k2 = (sorted(d1.keys()), sorted(d2.keys())) if k1 != k2: # keys should be exactly equal return _check_assert(False, k1, k2, "keys") return _check_assert(operator.eq(sum(_deep_eq(d1[k], d2[k]) for k in k1), len(k1)), d1, d2, "dictionaries")
def cursorPositionChanged(self): currentAlignment = self.textEdit.alignment() try: alignmentIndex = [ operator.eq(currentAlignment, a) for a in (QtCore.Qt.AlignLeft, QtCore.Qt.AlignHCenter, QtCore.Qt.AlignRight) ].index(True) self.alignmentGroup.actions()[alignmentIndex].setChecked(True) except ValueError: # TODO: log this exception pass textCursor = self.textEdit.textCursor() currentTable = textCursor.currentTable() isCursorInTable = currentTable is not None self.insertTableColumnAction.setEnabled(isCursorInTable) self.insertTableRowAction.setEnabled(isCursorInTable) self.removeTableRowAction.setEnabled(isCursorInTable and currentTable.rows() > 1) self.removeTableColumnAction.setEnabled(isCursorInTable and currentTable.columns() > 1) currentList = textCursor.currentList() isCursorInList = currentList is not None if isCursorInList: listStyle = currentList.format().style() index = self.listStyles.index(listStyle) else: index = 0 self.listActions[index].setChecked(True) if self.textEdit.overwriteMode(): self.updateCursor()
def standard_env(): "An environment with some Scheme standard procedures." import math, operator as op env = Env() env.update(vars(math)) # sin, cos, sqrt, pi, ... env.update({ '+':op.add, '-':op.sub, '*':op.mul, '/':op.div, '>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq, 'abs': abs, 'append': op.add, 'apply': apply, 'begin': lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:], 'cons': lambda x,y: [x] + y, 'eq?': op.is_, 'equal?': op.eq, 'length': len, 'let': lambda x: op.eq(x[0], x[1]), 'list': lambda *x: list(x), 'list?': lambda x: isinstance(x,list), 'map': map, 'max': max, 'min': min, 'not': op.not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x, Number), 'procedure?': callable, 'round': round, 'symbol?': lambda x: isinstance(x, Symbol), 'pi': math.pi, }) return env
def __eq__(self, other): vals = np.array(self) try: return np.isclose(vals, other) except TypeError: # likely here because vals/other is not numeric return operator.eq(vals, other)
def __init__(self): # xl to py formulas conversion for eval() self.__author__ = __author__ self.__version__ = __version__ # xl to py formula conversion self.fun_database = { 'IF' : lambda args : [args[0]*args[1]+(abs(args[0]-1)*args[2])][0],\ 'AVERAGE' : lambda args : np.average(args[0]),\ 'STDEV.P' : lambda args : np.std(args[0]),\ 'TRANSPOSE' : lambda args : np.transpose(args[0]),\ 'ABS' : lambda args : np.abs(args[0]),\ 'MMULT' : lambda args : np.dot(*args),\ 'IFERROR' : lambda args : self.pyxl_error(*args),\ 'SUM' : lambda args : np.sum(args[0]),\ 'COUNT' : lambda args : np.size(args[0]),\ 'SQRT' : lambda args : np.sqrt(args[0]),\ '^' : lambda args : np.power(*args),\ '<' : lambda args : np.float64(op.lt(*args)),\ '>' : lambda args : np.float64(op.gt(*args)),\ '<=' : lambda args : np.float64(op.le(*args)),\ '>=' : lambda args : np.float64(op.ge(*args)),\ '<>' : lambda args : np.float64(op.ne(*args)),\ '=' : lambda args : np.float64(op.eq(*args)),\ '+' : lambda args : np.add(*args),\ '-' : lambda args : np.subtract(*args),\ '/' : lambda args : np.divide(*args),\ '*' : lambda args : np.multiply(*args) }
def evaluate(self): result = None left = self.left.evaluate() right = self.right.evaluate() if self.operation == '+': result = operator.add(left, right) elif self.operation == '-': result = operator.sub(left, right) elif self.operation == '*': result = operator.mul(left, right) elif self.operation == '/': result = operator.div(left, right) elif self.operation == '^': result = operator.pow(left, right) elif self.operation == 'and': result = left and right elif self.operation == 'or': result = left or right elif self.operation == '<': result = operator.lt(left, right) elif self.operation == '<=': result = operator.le(left, right) elif self.operation == '==': result = operator.eq(left, right) elif self.operation == '!=': result = operator.ne(left, right) elif self.operation == '>': result = operator.gt(left, right) elif self.operation == '>=': result = operator.ge(left, right) elif self.operation == 'in': result = (left in right) return result
def Load_pmc(self): #info(self.c_code_sec_info) for index in self.c_code_sec_info: if eq(index["Name"],".pmc"): addr = index["Addr"] size = index["Size"] return [addr,size]
def evaluate(cond): # Method to evaluate the conditions if isinstance(cond,bool): return cond left, oper, right = cond if not model or not left in model.mgroup.fields: #check that the field exist return False oper = self.OPERAND_MAPPER.get(oper.lower(), oper) if oper == '=': res = operator.eq(model[left].get(model),right) elif oper == '!=': res = operator.ne(model[left].get(model),right) elif oper == '<': res = operator.lt(model[left].get(model),right) elif oper == '>': res = operator.gt(model[left].get(model),right) elif oper == '<=': res = operator.le(model[left].get(model),right) elif oper == '>=': res = operator.ge(model[left].get(model),right) elif oper == 'in': res = operator.contains(right, model[left].get(model)) elif oper == 'not in': res = operator.contains(right, model[left].get(model)) res = operator.not_(res) return res
def CASE2( self, main ): ''' point-to-point intents test for each BGP peer and BGP speaker pair ''' import time main.case( "Check point-to-point intents" ) main.log.info( "There are %s BGP peers in total " % main.params[ 'config' ][ 'peerNum' ] ) main.step( "Check P2P intents number from ONOS CLI" ) getIntentsResult = main.ONOScli.intents( jsonFormat=True ) bgpIntentsActualNum = \ main.QuaggaCliSpeaker1.extractActualBgpIntentNum( getIntentsResult ) bgpIntentsExpectedNum = int( main.params[ 'config' ][ 'peerNum' ] ) * 6 if bgpIntentsActualNum != bgpIntentsExpectedNum: time.sleep( int( main.params['timers']['RouteDelivery'] ) ) getIntentsResult = main.ONOScli.intents( jsonFormat=True ) bgpIntentsActualNum = \ main.QuaggaCliSpeaker1.extractActualBgpIntentNum( getIntentsResult ) main.log.info( "bgpIntentsExpected num is:" ) main.log.info( bgpIntentsExpectedNum ) main.log.info( "bgpIntentsActual num is:" ) main.log.info( bgpIntentsActualNum ) utilities.assertEquals( \ expect=True, actual=eq( bgpIntentsExpectedNum, bgpIntentsActualNum ), onpass="******", onfail="PointToPointIntent Intent Num is wrong!" )
def Gen_mem_card(self,cmd): if eq(cmd, "initial"): mem_card = self.initial mem_card_dump = self.initial_dump_chx elif eq(cmd, "results"): mem_card = self.results mem_card_dump = self.results_dump_chx else: error_exit("Wrong mem card type!") self.chx_avp.write("%s {\n"%(cmd)) for key in sorted(mem_card.iteritems(),key=lambda x:x[0]): self.chx_avp.write("\tmemory\t0x%s\t0x%s\n"%(key[0],key[1])) self.chx_avp.write("////\tCHX new tracer dump data\n") for key in sorted(mem_card_dump.iteritems(),key=lambda x:x[0]): self.chx_avp.write("\tmemory\t0x%s\t0x%s\n"%(key[0],key[1])) self.chx_avp.write("}\n")
def _deep_iter_eq(l1, l2): if len(l1) != len(l2): return _check_assert(False, l1, l2, "lengths") return _check_assert( operator.eq( sum(_deep_eq(v1, v2) for v1, v2 in zip(l1, l2)), len(l1)), l1, l2, "iterables")
def branch_affected(changes, name): get_name = partial(path, ['new', 'name']) return any_pass(lambda change: eq(name, get_name(change)), changes)
def upload_case(request): wb = xlrd.open_workbook( filename=None, file_contents=request.FILES['file'].read()) # 读取excel文件 table = wb.sheets()[0] row = table.nrows # 总行数 #table_header =['项目名称', '用例集名称', '用例名称', '接口名称', '接口URL', '请求头', '入参','执行顺序'] table_header = [ '项目名称', '用例集名称', '用例名称', '接口名称', '接口URL', '请求头', '入参', '执行顺序', '检查点' ] # 判断模板是否正确,通过operator.eq() 比较两个list是否相等 if (operator.eq(table_header, table.row_values(0))): #判断校验是否通过,如果不通过,返回错误信息 if (is_all_case(table, row) == True): # print(is_all_case(table,row)) for i in range(1, row): # 获取每一行的数据 cel = table.row_values(i) for c in range(len(cel)): if (isinstance(cel[c], (str))): #如果是str去除空格 cel[c] = cel[c].replace(' ', '') #去除空格 #1、获取模板的对象 template_ids = Template_info.objects.get(pnumber__pname=cel[0], templatename=cel[1], isdelete=0) # 2、获取接口对象 interface_id = Interface_info.objects.get(interfacename=cel[3], url=cel[4], pname=cel[0], isdelete=0) #判断用例是否存在 is_case = Template_detail.objects.filter( templateid__pnumber__pname=cel[0], templateid__templatename=cel[1], usercaseid__usercasename=cel[2], interfaceid__interfacename=cel[3], isdelete=0).values("usercaseid") if (is_case): User_case.objects.filter( id=list(is_case)[0].get("usercaseid")).update( usercasename=cel[2], interfaceurl=cel[4], headerinfo=cel[5], paraminfo=cel[6], isjoin=0, isheader=1, run_order=cel[7], isequal=cel[8], status=0) else: # 3、添加用例,返回用例对象 case = User_case.objects.create(usercasename=cel[2], interfaceurl=cel[4], headerinfo=cel[5], paraminfo=cel[6], isjoin=0, isheader=1, run_order=cel[7], isequal=cel[8], interfaceid=interface_id, status=0) # 4、添加接口明细表 Template_detail.objects.create(interfaceid=interface_id, templateid=template_ids, usercaseid=case) else: return HttpResponse(json.dumps(is_all_case(table, row)))
def test_time_invalid_compare_on_py2(): # we cannot actually compare datetime.time objects and literals # in a deferred way in python 2, they short circuit in the CPython result = operator.eq(time(10, 0), literal('10:00')) assert not result
delimiter=",") for j in range(2): n_estimators = 10 + j * 10 gbr = MyGradientBoostingRegressor(n_estimators=n_estimators, max_depth=5, min_samples_split=2) gbr.fit(x_train, y_train) model_string = gbr.get_model_string() with open( "Test_data" + os.sep + "gradient_boosting_" + str(i) + "_" + str(j) + ".json", 'r') as fp: test_model_string = json.load(fp) with open( os.path.join( 'save', 'gradient_boosting_gen_' + str(i) + "_" + str(j) + '.json'), 'w') as outfile: json.dump(model_string, outfile) print(operator.eq(model_string, test_model_string)) y_pred = gbr.predict(x_train) y_test_pred = np.genfromtxt("Test_data" + os.sep + "y_pred_gradient_boosting_" + str(i) + "_" + str(j) + ".csv", delimiter=",") print(np.square(y_pred - y_test_pred).mean() <= 10**-10) # print()
def gte(a, b): return operator.gt(a, b) or operator.eq(a, b)
def download(sock): #从服务端接收文件列表 filelist = sock.recv(1024).decode() if operator.eq(filelist, ''): print('没有可以下载的文件') print(filelist) #从用户中输入接收文件名,并发送给服务端 filename = input('请输入要下载的文件名:\n') sock.send(filename.encode()) #获取包大小,并解压 FILEINFO_SIZE = struct.calcsize('128sI') try: fhead = sock.recv(1024) fhead = sock.recv(FILEINFO_SIZE) filename, filesize = struct.unpack('128sI', fhead) #接收文件 with open('new_' + filename.decode().strip('\00'), 'wb') as f: ressize = filesize while True: if ressize > 1024: filedata = sock.recv(1024) else: filedata = sock.recv(ressize) f.write(filedata) break if not filedata: break f.write(filedata) ressize = ressize - len(filedata) if ressize < 0: break print('文件传输成功!') except Exception as e: print(e) print('文件传输失败!')
data = {} cnt = 0 for i in range(1, len(browser.find_element_by_class_name("hbg_t").find_element_by_tag_name("ul").find_elements_by_tag_name("li")[10].find_element_by_tag_name("ul").find_elements_by_tag_name("li"))): browser.find_element_by_class_name("hbg_t").find_element_by_tag_name("ul").find_elements_by_tag_name("li")[10].find_element_by_tag_name("ul").find_elements_by_tag_name("li")[i].click() category = browser.find_element_by_class_name("hbg_t").find_element_by_tag_name("ul").find_elements_by_tag_name("li")[10].find_element_by_tag_name("ul").find_elements_by_tag_name("li")[i].text time.sleep(10) pageLen = int(re.sub("페이지수 : ", "", browser.find_element_by_class_name("listnum").text.split(",")[1]).split("/")[1]) for k in range(0, pageLen): contentLen = len(browser.find_element_by_class_name("table_list").find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")) for j in range(0, contentLen): data[cnt] = {} browser.find_element_by_class_name("table_list").find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")[j].find_element_by_class_name("list_subject").find_element_by_tag_name("a").click() time.sleep(10) if eq(category, "탁주") or eq(category, "약주") or eq(category, "청주"): data[cnt]["ProductName"] = browser.find_element_by_class_name("board_view").find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")[0].find_element_by_class_name("view_subject").text data[cnt]["BreweryName"] = re.sub("제조장명 : ", "", browser.find_element_by_class_name("view_content").text.split("\n")[1]) cnt += 1 browser.back() time.sleep(10) elif eq(category, "맥주"): continue else: data[cnt]["ProductName"] = browser.find_element_by_class_name("board_view").find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")[0].find_element_by_class_name("view_subject").text.split("/")[1] data[cnt]["BreweryName"] = browser.find_element_by_class_name("board_view").find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")[0].find_element_by_class_name("view_subject").text.split("/")[0] cnt += 1 browser.back() time.sleep(10)
def is_equal_dict(self, dict_one, dict_two): # 判断两个字典是否相等 if isinstance(dict_one, str): dict_one = json.loads(dict_one) if isinstance(dict_two, str): dict_one = json.loads(dict_two) return operator.eq(dict_one, dict_two)
from past.builtins import long LOGGER = logging.getLogger('pyresttest.validators') # Binary comparison tests COMPARATORS = { 'count_eq': lambda x, y: safe_length(x) == y, 'count_lte': lambda x, y: safe_length(x) <= y, 'count_gte': lambda x, y: safe_length(x) >= y, 'lt': operator.lt, 'less_than': operator.lt, 'le': operator.lt, 'less_than_or_equal': operator.lt, 'eq': operator.eq, 'equals': operator.eq, 'str_eq': lambda x, y: operator.eq(str(x), str(y)), 'ne': operator.ne, 'not_equals': operator.ne, 'ge': operator.ge, 'greater_than_or_equal': operator.ge, 'gt': operator.gt, 'greater_than': operator.gt, 'contains': lambda x, y: x and operator.contains(x, y), # is y in x 'contained_by': lambda x, y: y and operator.contains(y, x), # is x in y 'regex': lambda x, y: regex_compare(str(x), str(y)), 'type': lambda x, y: test_type(x, y) } COMPARATORS['length_eq'] = COMPARATORS['count_eq'] # Allow for testing basic types in comparators TYPES = {
def simulate(self, value_store, state_store): in0 = BitVector(value_store.get_value(self.in0)) in1 = BitVector(value_store.get_value(self.in1)) out = operator.eq(in0, in1).as_bool_list()[0] value_store.set_value(self.out, out)
def test_api(host_id, case_id, project_id, _id): """ 执行接口测试 :param host_id: 测试的host域名 :param case_id: 测试用例ID :param _id: 用例下接口ID :param project_id: 所属项目 :return: """ host = GlobalHost.objects.get(id=host_id, project=project_id) data = AutomationCaseApiSerializer(AutomationCaseApi.objects.get(id=_id, automationTestCase=case_id)).data http_type = data['httpType'] request_type = data['requestType'] address = host.host + data['apiAddress'] head = json.loads(serializers.serialize('json', AutomationHead.objects.filter(automationCaseApi=_id))) header = {} request_parameter_type = data['requestParameterType'] examine_type = data['examineType'] http_code = data['httpCode'] response_parameter_list = data['responseData'] if http_type == 'HTTP': url = 'http://'+address else: url = 'https://'+address if data['requestParameterType'] == 'form-data': parameter_list = json.loads(serializers.serialize('json', AutomationParameter.objects.filter(automationCaseApi=_id))) parameter = {} for i in parameter_list: key_ = i['fields']['name'] value = i['fields']['value'] try: if i['fields']['interrelate']: interrelate_type = re.findall('(?<=<response\[).*?(?=\])', value) if interrelate_type[0] == "JSON": api_id = re.findall('(?<=<response\[JSON]\[).*?(?=\])', value) a = re.findall('(?<=\[").*?(?="])', value) try: param_data = eval(json.loads(serializers.serialize( 'json', AutomationTestResult.objects.filter(automationCaseApi=api_id[0]))) [0]['fields']["responseData"]) for j in a: param_data = param_data[j] except Exception as e: logging.exception(e) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="") return 'fail' elif interrelate_type[0] == "Regular": api_id = re.findall('(?<=<response\[Regular]\[).*?(?=\])', value) pattern = re.findall('(?<=\[").*?(?="])', value) param_data = json.loads(serializers.serialize( 'json', AutomationTestResult.objects.filter(automationCaseApi=api_id[0])))[-1]['fields']["responseData"] param_data = re.findall(pattern[0], param_data.replace("\'", "\""))[0] else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="") return 'fail' pattern = re.compile(r'<response\[.*]') parameter[key_] = re.sub(pattern, str(param_data), value) else: parameter[key_] = value except KeyError as e: logging.exception(e) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="关联有误!") return 'fail' if data["formatRaw"]: request_parameter_type = "raw" else: parameter = AutomationParameterRawSerializer(AutomationParameterRaw.objects.filter(automationCaseApi=_id), many=True).data if len(parameter): if len(parameter[0]["data"]): try: parameter = eval(parameter[0]["data"]) except Exception as e: logging.exception(e) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="") return 'fail' else: parameter = {} else: parameter = {} for i in head: key_ = i['fields']['name'] value = i['fields']['value'] if i['fields']['interrelate']: try: interrelate_type = re.findall('(?<=<response\[).*?(?=\])', value) if interrelate_type[0] == "JSON": api_id = re.findall('(?<=<response\[JSON]\[).*?(?=\])', value) a = re.findall('(?<=\[").*?(?="])', value) try: param_data = eval(json.loads(serializers.serialize( 'json', AutomationTestResult.objects.filter(automationCaseApi=api_id[0])))[-1]['fields']["responseData"]) for j in a: param_data = param_data[j] except Exception as e: logging.exception(e) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="关联有误!") return 'fail' elif interrelate_type[0] == "Regular": api_id = re.findall('(?<=<response\[Regular]\[).*?(?=\])', value) pattern = re.findall('(?<=\[").*?(?="])', value) param_data = json.loads(serializers.serialize( 'json', AutomationTestResult.objects.filter(automationCaseApi=api_id[0])))[0]['fields']["responseData"] param_data = re.findall(pattern[0], param_data.replace("\'", "\""))[0] else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="") return 'fail' pattern = re.compile(r'<response\[.*]') header[key_] = re.sub(pattern, str(param_data), value) except Exception as e: logging.exception(e) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='ERROR', code="", response_data="关联有误!") return 'fail' else: header[key_] = value header["Content-Length"] = '%s' % len(str(parameter)) try: if request_type == 'GET': code, response_data = get(header, url, request_parameter_type, parameter) elif request_type == 'POST': code, response_data = post(header, url, request_parameter_type, parameter) elif request_type == 'PUT': code, response_data = put(header, url, request_parameter_type, parameter) elif request_type == 'DELETE': code, response_data = delete(header, url, request_parameter_type, parameter) else: return 'ERROR' except ReadTimeout: logging.exception(ReadTimeout) record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='TimeOut', code="408", response_data="") return 'timeout' if examine_type == 'no_check': record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, host=host.name, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, _result='PASS', code=code, response_data=response_data) return 'success' elif examine_type == 'json': if int(http_code) == code: if not response_parameter_list: response_parameter_list = "{}" try: logging.info(response_parameter_list) logging.info(response_data) result = check_json(json.loads(response_parameter_list), response_data) except Exception: logging.info(response_parameter_list) result = check_json(eval(response_parameter_list.replace('true', 'True').replace('false', 'False')), response_data) if result: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="JSON校验", examine_data=response_parameter_list, host=host.name, _result='PASS', code=code, response_data=response_data) else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="JSON校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return result else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="JSON校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' elif examine_type == 'only_check_status': if int(http_code) == code: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="校验HTTP状态", examine_data=response_parameter_list, host=host.name, _result='PASS', code=code, response_data=response_data) return 'success' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="校验HTTP状态", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' elif examine_type == 'entirely_check': if int(http_code) == code: try: result = operator.eq(json.loads(response_parameter_list), response_data) except Exception as e: logging.exception(e) result = operator.eq(eval(response_parameter_list.replace('true', 'True').replace('false', 'False')), response_data) if result: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="完全校验", examine_data=response_parameter_list, host=host.name, _result='PASS', code=code, response_data=response_data) return 'success' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="完全校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="完全校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' elif examine_type == 'Regular_check': if int(http_code) == code: try: logging.info(response_parameter_list) result = re.findall(response_parameter_list, json.dumps(response_data)) logging.info(result) except Exception as e: logging.exception(e) return "fail" if result: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="正则校验", examine_data=response_parameter_list, host=host.name, _result='PASS', code=code, response_data=response_data) return 'success' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="正则校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type="正则校验", examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail' else: record_results(_id=_id, url=url, request_type=request_type, header=header, parameter=parameter, status_code=http_code, examine_type=examine_type, examine_data=response_parameter_list, host=host.name, _result='FAIL', code=code, response_data=response_data) return 'fail'
Determine the relationship (equal or not) between two objects in Python ==、cmp()、is assert_frame_equal(): print error once unequal values are detacted ''' import operator import pandas as pd import numpy as np from pandas.testing import assert_frame_equal a = [1, 2, 3] b = [2, 3, 4] d = [1, 2, 3] c = [i - 1 for i in b] # Operator '==' is used to compare 「values」 within variables # which is totally different from or even opposite to the one in Java print(a == c) print(a == d) # Keyword 'is' is used to make sure if two variables point to 「the same object」 print(a is c) print(a is d) # Build-in function operator.eq(), same as '==' in Python, compare values of two variable print(operator.eq(a, c)) print(operator.eq(a, d)) # Compare DataFrame df1 = pd.DataFrame(np.arange(12).reshape(3, -1)) df2 = pd.DataFrame(np.arange(12).reshape(3, -1)) assert_frame_equal(df1, df2) df2.loc[2, 1] = 7 assert_frame_equal(df1, df2)
def check_result(case_data, code, data): """ 校验测试结果 :param case_data: 用例数据 :param code: 接口状态码 :param data: 返回的接口json数据 :return: """ try: # 获取用例检查信息 check_type = case_data['check_body']['check_type'] expected_code = case_data['check_body']['expected_code'] expected_result = case_data['check_body']['expected_result'] except Exception as e: raise KeyError('获取用例检查信息失败:{}'.format(e)) # 接口数据校验 if check_type == 'no_check': with allure.step("不校验接口结果"): pass elif check_type == 'check_code': with allure.step("仅校验接口状态码"): allure.attach(name="实际code", body=str(code)) allure.attach(name="期望code", body=str(expected_code)) allure.attach(name='实际data', body=str(data)) if int(code) != expected_code: raise Exception("接口状态码错误!\n %s != %s" % (code, expected_code)) elif check_type == 'check_json': with allure.step("JSON格式校验接口"): allure.attach(name="实际code", body=str(code)) allure.attach(name="期望code", body=str(expected_code)) allure.attach(name='实际data', body=str(data)) allure.attach(name='期望data', body=str(expected_result)) if int(code) == expected_code: if not data: data = "{}" check_json(expected_result, data) else: raise Exception("接口状态码错误!\n %s != %s" % (code, expected_code)) elif check_type == 'entirely_check': with allure.step("完全校验接口结果"): allure.attach(name="实际code", body=str(code)) allure.attach(name="期望code", body=str(expected_code)) allure.attach(name='实际data', body=str(data)) allure.attach(name='期望data', body=str(expected_result)) if int(code) == expected_code: result = operator.eq(expected_result, data) if not result: raise Exception("完全校验失败! %s ! = %s" % (expected_result, data)) else: raise Exception("接口状态码错误!\n %s != %s" % (code, expected_code)) elif check_type == 'regular_check': if int(code) == expected_code: try: result = "" if isinstance(expected_result, list): for i in expected_result: result = re.findall(i.replace("\"", "\""), str(data)) allure.attach('校验完成结果\n', str(result)) else: result = re.findall(expected_result.replace("\"", "\'"), str(data)) with allure.step("正则校验接口结果"): allure.attach(name="实际code", body=str(code)) allure.attach(name="期望code", body=str(expected_code)) allure.attach(name='实际data', body=str(data)) allure.attach(name='期望data', body=str(expected_result).replace( "\'", "\"")) allure.attach( name=expected_result.replace("\"", "\'") + '校验完成结果', body=str(result).replace("\'", "\"")) if not result: raise Exception("正则未校验到内容! %s" % expected_result) except KeyError: raise Exception("正则校验执行失败! %s\n正则表达式为空时" % expected_result) else: raise Exception("接口状态码错误!\n %s != %s" % (code, expected_code)) else: raise Exception("无该接口校验方式%s" % check_type) # 判断是否存在数据库校验标识 if 'check_db' in case_data: from comm.unit import queryDatabase as qdb check_db = case_data['check_db'] # 获取数据库期望结果:获取期望结果-获取关联值-替换关联值 # data['parameter'] = case_data['parameter'] relevance = readRelevance.get_relevance(data, check_db) check_db = replaceRelevance.replace(check_db, relevance) # 循环校验数据库 for each in check_db: try: check_type = each['check_type'] execute_sql = each['execute_sql'] expected_result = each['expected_result'] except KeyError as e: raise KeyError('【check_db】存在错误字段!\n{}'.format(e)) except TypeError: raise KeyError("【check_db】类型错误,期望<class 'list'>,而不是%s!" % type(expected_result)) if not isinstance(expected_result, list): raise KeyError( "【expected_result】类型错误,期望<class 'list'>,而不是%s!" % type(expected_result)) # 检索SQL语句 exp = r"^select (.*?) from (.*?) where (.*?)$" res = re.findall(exp, execute_sql.strip())[0] for r in res: if not each: msg = '标准格式: ' + exp raise Exception('无效SQL>>> {}\n{}'.format(execute_sql, msg)) # 判断数据库检查类型 if check_type == 'mysql': actual = qdb.query_mysql(execute_sql) elif check_type == 'hbase': actual = qdb.query_hbase(execute_sql) elif check_type == 'solr': actual = qdb.query_solr(execute_sql) elif check_type == 'es': actual = qdb.query_es(execute_sql) else: raise Exception("无该数据库校验方式%s" % check_type) # 增加输出并进行数据校验 mark = check_type.replace('check_', '').upper() + '[' + res[1] + ']' with allure.step("校验数据库{}".format(mark)): allure.attach(name="实际结果", body=str(actual)) allure.attach(name='期望结果', body=str(expected_result)) # expected_num = each['expected_num'] # allure.attach(name="实际行数", body=str(len(actual))) # allure.attach(name='期望行数', body=str(expected_num)) # # 验证数据库实际结果数量是否正确 # if len(actual) != int(expected_num): # raise AssertionError('校验数据库{}行数未通过!'.format(mark)) # 检查实际结果中第一条结果值 *************** for index, expected in enumerate(expected_result): try: check_database(actual[index], expected, mark + str(index)) except IndexError: raise IndexError( '校验数据库{}失败,期望结果超出实际条目!'.format(mark + str(index)))
def operator(x, y): return z3.Not(op.eq(x, y))
def time_between(_time, start_time, end_time): if start_time > end_time: # night return _time >= start_time or _time <= end_time elif start_time < end_time: # day return start_time <= _time <= end_time return _time == start_time OPERATORS = { "equal": lambda value, test: op.eq(value, test), "not_equal": lambda value, test: op.not_(op.eq(value, test)), "in": lambda value, test: op.contains(test, value), "not_in": lambda value, test: op.not_(op.contains(test, value)), "greater": lambda value, test: op.gt(value, test), "less": lambda value, test: op.lt(value, test), "greater_or_equal": lambda value, test: op.ge(value, test), "less_or_equal": lambda value, test: op.le(value, test), "between": lambda value, a, b: (a <= value <= b), "not_between": lambda value, a, b: op.not_(a <= value <= b), "is_empty": lambda value: op.not_(value), "is_not_empty": lambda value: op.not_(op.not_(value)), } OPERATORS['is_not_null'] = OPERATORS['is_not_empty'] OPERATORS['is_null'] = OPERATORS['is_empty']
def insertBirthday(self): name = input('请输入您的中文姓名:') ischinese=self.is_chinese(name) if name and ischinese is True : tf=True while tf: birthday=input('请按格式2018-01-02输入您的阳历出生日期:') if birthday and len(birthday)==10: bdinfo=birthday.split('-') year=bdinfo[0] month=bdinfo[1] day=bdinfo[2] if len(year)==4 and len(month)==2 and len(day)==2: if int(month)<=12 and int(day)<=31: print('您的出生年月日为:%s年%s月%s日'%(year,month,day)) yn=input('确认请输入Y,修正请输入N:') if operator.eq('Y',str(yn)) or operator.eq('y',str(yn)): tf = False # 输入合法,终止死循环 A=int(day[0]) if len(day)>1 else 0 B=int(day[1]) if len(day)>1 else int(day) C=int(month[0]) if len(month)>1 else 0 D = int(month[1]) if len(month) > 1 else int(month) E = int(year[0]) F = int(year[1]) G = int(year[2]) H = int(year[3]) I = self.addRule(A, B) J = self.addRule(C, D) K = self.addRule(E, F) L = self.addRule(G, H) M = self.addRule(I, J) N = self.addRule(K, L) O = self.addRule(M, N) P = self.addRule(M, O) Q = self.addRule(N, O) R = self.addRule(Q, P) X = self.addRule(I, M) W = self.addRule(J, M) S = self.addRule(X, W) V = self.addRule(K, N) U = self.addRule(L, N) T = self.addRule(V, U) #幸运数字=年份 除以 (名字的笔画数(连名带姓)+月+日)得出的数,各个位数相加 number=int(int(year)/(int(self.countStrokes(name))+int(month)+int(day))) if len(str(number))==1: luckyNum=number elif len(str(number))==2: luckyNum = self.addRule(int(str(number)[0]),int(str(number)[1])) elif len(str(number)) == 3: number=int(str(number)[0])+int(str(number)[1])+int(str(number)[2]) luckyNum = number if len(str(number))<2 else int(str(number)[0])+int(str(number)[1]) talentNum=A+B+C+D+E+F+G+H outTrend=S+R+T outTrendType=self.outTrendType(outTrend) print('''\n您的生命密码图为: %s %s %s -------------- %s=%s%s / %s \ %s%s=%s / %s %s \ / %s%s %s%s \ /%s%s %s%s %s%s %s%s\\\n'''%(R,Q,P,S,X,W,O,V,U,T,M,N,I,J,K,L,A,B,C,D,E,F,G,H)) print('您的主性格数字:%s,在五行中属:%s'%(O,self.wuxing(O))) print('您的幸运数字:%s' % luckyNum) print('您的天赋数字:%s' % talentNum) print('您的生命数字:%s'%self.addRule(talentNum,0)) print('您的潜意识密码:%s%s%s' % (O,I,L)) print('您的内心密码:%s%s%s' % (O,M,N)) print('您的外心密码:%s%s%s' % (S,R,T)) print('您的外在动向等于:%s,您是【%s】'%(outTrend,outTrendType)) print("程序计算完毕,感谢使用!") elif operator.eq('N',str(yn)) or operator.eq('n',str(yn)): print("您选择重新输入:") else: print('请检查出生年月的格式!\n') else: print('请检查出生年月的格式!\n') else: print('请检查出生年月的格式!\n') else: print('请输入合法的中文名!\n') self.insertBirthday()
def __eq__(self, y): return operator.eq(self.val, y)
def make_shard(self, img, bbox, mip=None, spec=None, progress=False): """ Convert an image that represents a single complete shard into a shard file. img: a volumetric numpy array image bbox: the bbox it represents in voxel coordinates mip: if specified, use the sharding specification from this mip level, otherwise use the sharding spec from the current implicit mip level in config. spec: use the provided specification (overrides mip parameter) Returns: (filename, shard_file) """ mip = mip if mip is not None else self.config.mip scale = self.meta.scale(mip) if spec is None: if 'sharding' in scale: spec = sharding.ShardingSpecification.from_dict( scale['sharding']) else: raise ValueError( "mip {} does not have a sharding specification.".format( mip)) bbox = Bbox.create(bbox) if bbox.subvoxel(): raise ValueError( "Bounding box is too small to make a shard. Got: {}".format( bbox)) # Alignment Checks: # 1. Aligned to atomic chunks - required for grid point generation aligned_bbox = bbox.expand_to_chunk_size( self.meta.chunk_size(mip), offset=self.meta.voxel_offset(mip)) if bbox != aligned_bbox: raise exceptions.AlignmentError( "Unable to create shard from a non-chunk aligned bounding box. Requested: {}, Aligned: {}" .format(bbox, aligned_bbox)) # 2. Covers the dataset at least partially aligned_bbox = Bbox.clamp(aligned_bbox, self.meta.bounds(mip)) if aligned_bbox.subvoxel(): raise exceptions.OutOfBoundsError( "Shard completely outside dataset: Requested: {}, Dataset: {}". format(bbox, self.meta.bounds(mip))) grid_size = self.grid_size(mip) chunk_size = self.meta.chunk_size(mip) reader = sharding.ShardReader(self.meta, self.cache, spec) # 3. Gridpoints all within this one shard gpts = list(gridpoints(aligned_bbox, self.meta.bounds(mip), chunk_size)) morton_codes = compressed_morton_code(gpts, grid_size) all_same_shard = bool( reduce(lambda a, b: operator.eq(a, b) and a, map(reader.get_filename, morton_codes))) if not all_same_shard: raise exceptions.AlignmentError( "The gridpoints for this image did not all correspond to the same shard. Got: {}" .format(bbox)) labels = {} pt_anchor = gpts[0] * chunk_size for pt_abs, morton_code in zip(gpts, morton_codes): cutout_bbx = Bbox(pt_abs * chunk_size, (pt_abs + 1) * chunk_size) # Neuroglancer expects border chunks not to extend beyond dataset bounds cutout_bbx.maxpt = cutout_bbx.maxpt.clip( None, self.meta.volume_size(mip)) cutout_bbx -= pt_anchor chunk = img[cutout_bbx.to_slices()] labels[morton_code] = chunks.encode(chunk, self.meta.encoding(mip)) shard_filename = reader.get_filename(first(labels.keys())) return (shard_filename, spec.synthesize_shard(labels, progress=progress))
def _deep_iter_eq(l1, l2): if len(l1) != len(l2): return _check_assert(False, l1, l2, "lengths") return _check_assert( operator.eq(sum(_deep_eq(v1, v2) for v1, v2 in zip(l1, l2)), len(l1)), l1, l2, "iterables")
def assertEq(*args): import operator if not all(map(lambda x: operator.eq(x, args[0]), args[1:])): raise AssertionError('all elements of [%s] are not equal' % ", ".join(map(str, args)))
def getToken(mystr): global _p, _value, _syn, _mstate, _dstate, _line, _cstate _value = '' ch = mystr[_p] _p += 1 while ch == ' ': ch = mystr[_p] _p += 1 if ch in string.ascii_letters or ch == '_': # letter(letter|digit)* while ch in string.ascii_letters or ch in string.digits or ch == '_' or ch in _abnormalChar: _value += ch ch = mystr[_p] _p += 1 _p -= 1 for abnormal in _abnormalChar: if abnormal in _value: _syn = 'error6' break else: _syn = 'ID' for s in _key: if operator.eq(s, _value) == True: _syn = _value.upper() # key word break if _syn == 'ID': inSymbolTable(_value) elif ch == '\"': # string while ch in string.ascii_letters or ch in '\"% ': _value += ch if _mstate == 0: if ch == '\"': _mstate = 1 elif _mstate == 1: if ch == '\"': _mstate = 2 ch = mystr[_p] _p += 1 if _mstate == 1: _syn = 'error2' _mstate = 0 elif _mstate == 2: _mstate = 0 _syn = 'STRING' _p -= 1 elif ch in string.digits: while ch in string.digits or ch == '.' or ch in string.ascii_letters: _value += ch if _dstate == 0: if ch == '0': _dstate = 1 else: _dstate = 2 elif _dstate == 1: if ch == '.': _dstate = 3 else: _dstate = 5 elif _dstate == 2: if ch == '.': _dstate = 3 ch = mystr[_p] _p += 1 for char in string.ascii_letters: if char in _value: _syn = 'error7' _dstate = 0 if _syn != 'error7': if _dstate == 5: _syn = 'error3' _dstate = 0 else: _dstate = 0 if '.' not in _value: _syn = 'DIGIT' # digit+ else: if _value.count('.') == 1: _syn = 'FRACTION' # Floating point number else: _syn = 'error5' _p -= 1 elif ch == '\'': # char while ch in string.ascii_letters or ch in '@#$%&*\\\'\"': _value += ch if _cstate == 0: if ch == '\'': _cstate = 1 elif _cstate == 1: if ch == '\\': _cstate = 2 elif ch in string.ascii_letters or ch in '@#$%&*': _cstate = 3 elif _cstate == 2: if ch in 'nt': _cstate = 3 elif _cstate == 3: if ch == '\'': _cstate = 4 ch = mystr[_p] _p += 1 _p -= 1 if _cstate == 4: _syn = 'CHARACTER' _cstate = 0 else: _syn = 'error4' _cstate = 0 elif ch == '<': _value = ch ch = mystr[_p] if ch == '=': # '<=' _value += ch _p += 1 _syn = '<=' else: # '<' _syn = '<' elif ch == '>': _value = ch ch = mystr[_p] if ch == '=': #'>=' _value += ch _p += 1 _syn = '>=' else: # '>' _syn = '>' elif ch == '!': _value = ch ch = mystr[_p] if ch == '=': #'!=' _value += ch _p += 1 _syn = '!=' else: # '!' _syn = 'NOT' elif ch == '+': _value = ch ch = mystr[_p] if ch == '+': # '++' _value += ch _p += 1 _syn = '++' else: # '+' _syn = 'PLUS' elif ch == '-': _value = ch ch = mystr[_p] if ch == '-': #'--' _value += ch _p += 1 _syn = '--' else: # '-' _syn = 'MINUS' elif ch == '=': _value = ch ch = mystr[_p] if ch == '=': # '==' _value += ch _p += 1 _syn = '==' else: # '=' _syn = 'ASSIGNOP' elif ch == '&': _value = ch ch = mystr[_p] if ch == '&': # '&&' _value += ch _p += 1 _syn = 'AND' else: # '&' _syn = '&' elif ch == '|': _value = ch ch = mystr[_p] if ch == '|': # '||' _value += ch _p += 1 _syn = 'OR' else: # '|' _syn = '|' elif ch == '*': # '*' _value = ch _syn = 'STAR' elif ch == '/': # '/' _value = ch _syn = 'DIV' elif ch == ';': # ';' _value = ch _syn = 'SEMI' elif ch == '(': # '(' _value = ch _syn = 'LP' elif ch == ')': # ')' _value = ch _syn = 'RP' elif ch == '{': # '{' _value = ch _syn = 'LC' elif ch == '}': # '}' _value = ch _syn = 'RC' elif ch == '[': # '[' _value = ch _syn = 'LB' elif ch == ']': # ']' _value = ch _syn = 'RB' elif ch == ',': # ',' _value = ch _syn = 'COMMA' elif ch == '\n': _syn = 'error1'
def lte(a, b): return operator.lt(a, b) or operator.eq(a, b)
def __eq__(self, other): return operator.eq(self.__num, other.__num) and operator.eq(self.__string, other.__string)
def svm_recursion_fixed_nu_proto(file_feature, label, nu_cl_for_svm, cl_list, num_cl, proto_file, nb_protos_cl, alph, pic_name): #一般 num_cl= 2,5,10,50 #仅仅叶子节点 if num_cl == 1: step = 0 #缺多少补多少样本 #判断当前的类别 cl_now = label[0] #当前类 while len(proto_file[cl_now]) < nb_protos_cl and step < len(label): if pic_name[step] not in proto_file[cl_now]: proto_file[cl_now].extend(pic_name[step]) step = step + 1 return proto_file #label:是样本来别标签;还需要加-1,1标签 file_feature = np.float32(file_feature) label = np.int32(label) num_cl_left = int(np.ceil(num_cl / 2)) num_cl_right = int(np.floor(num_cl / 2)) #-1 和 1 的标签 #类别样本的数量; #样本的数量 要不要改变nu_cl_for_svm的数量 #获得当前原本的类别数 #获得当前样本的数量 num_file_left = np.sum(nu_cl_for_svm[cl] for cl in range(num_cl_left)) num_file_right = np.sum(nu_cl_for_svm[cl] for cl in range(num_cl_left, num_cl_left + num_cl_right)) label_01 = np.array([-1] * num_file_left + [1] * num_file_right) #样本的标签 #label 存在样本标签 #label_true=np.array() #训练SVM #使用 np.where() #然后挑选样本 svm = svm_config() label_01 = np.int32(label_01) svm_train(svm, file_feature, label_01) kk = svm.getSupportVectors() #获得支持向量 #根据KKT条件挑选样本挑选样本 for svc in kk: loction = 0 loc_svc_list = [] for lines in file_feature: #根据样本所属集合找到样本的类别 if operator.eq(lines.tolist(), svc.tolist()): #属于哪一类的特征 loc_svc_list.extend([loction]) loction += 1 #loc_svc 可能是list for loc_svc in loc_svc_list: svc_true_label = label[loc_svc] # 找到样本的类别 #svc_true_label=itera*num_cl+num_cl #在SVC 周围选取一些样本5个;使用曼哈顿距离选取。 #提取该类的样本: ind_file_now = np.where(label == svc_true_label)[0] file_now = file_feature[ind_file_now, :] #要在图片信息和序号之间做一个匹配; #可以建立一个字典; #通过特征选择样本。 dis = [] for feat in file_now: dis.extend(np.linalg.norm(feat - svc, ord=1, keepdims=True)) #dis=np.array(np.linalg.norm(feat-svc,ord=1,keepdims=True) for feat in file_now)#使用曼哈顿距离 取距离最小的样本 label_sort = np.argsort(dis) #[2,4,3,1]返回距离原位置的标签返回[3,0,2,1] #取样本 #应该直接将图片名称加入 feat 是特征: 还要找到特征对应的图片名称。 prototmp = label_sort[0:alph] # 取5个样本 ind_proto = ind_file_now[prototmp] #添加的是当前训练样本的序号: for proto in ind_proto: if len(proto_file[svc_true_label]) < nb_protos_cl: proto_file[svc_true_label].append(pic_name[proto]) # else: break if num_cl == 2: #两个 step = 0 #缺多少补多少样本 #判断当前的类别 class_2 = [cl_list[0], cl_list[1]] for leaf_class in class_2: #两个类别都需要填满样本 while len(proto_file[leaf_class]) < nb_protos_cl and step < np.sum( label): if pic_name[step] not in proto_file[leaf_class]: proto_file[leaf_class].append(pic_name[step]) step = step + 1 return proto_file #更新样本需要处理的数据: file_feature_left = file_feature[0:num_file_left, :] file_feature_right = file_feature[num_file_left:, :] pic_name_left = pic_name[0:num_file_left] pic_name_right = pic_name[num_file_left:] label_left = label[0:num_file_left] label_right = label[num_file_left:] cl_list_left = cl_list[0:num_cl_left] cl_list_right = cl_list[num_cl_left:] nu_cl_for_svm_left = nu_cl_for_svm[0:num_cl_left] nu_cl_for_svm_right = nu_cl_for_svm[num_cl_left:] svm_recursion_fixed_nu_proto(file_feature_left, label_left, nu_cl_for_svm_left, cl_list_left, num_cl_left, proto_file, nb_protos_cl, alph, pic_name_left) svm_recursion_fixed_nu_proto(file_feature_right, label_right, nu_cl_for_svm_right, cl_list_right, num_cl_right, proto_file, nb_protos_cl, alph, pic_name_right)
def slot_signal_1(self): if len(self.file_list) == 0 or self.dir_name == '': self.ui.pushButton_down_firm.setEnabled(True) QMessageBox.warning(self, "下载固件前 读取固件错误", '必须先选择固件路径', QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) return try: target_id = self.jlink.core_id() except: self.ui.pushButton_down_firm.setEnabled(False) self.ui.pushButton_connect_ap.setText('连接AP') QMessageBox.warning(self, "下载固件前 读取目标板ID错误", 'jlink已经断开连接', QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) return if target_id == -1: self.ui.pushButton_down_firm.setEnabled(False) self.ui.pushButton_connect_ap.setText('连接AP') QMessageBox.warning(self, "下载固件前 读取目标板ID错误", '目标板已断开连接', QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) return str_log = '读取目标板id:%d\n开始下载固件:%s...\n' % ( target_id, self.file_list[self.now_file_index]) self.ui.textBrowser_log.setText(str_log) try: self.jlink.flash_file( self.dir_name + '/' + self.file_list[self.now_file_index], 0X08000000) except Exception as ext: self.ui.pushButton_down_firm.setEnabled(False) self.ui.pushButton_connect_ap.setText('连接AP') QMessageBox.warning(self, "下载固件错误", '下载固件,\nExcept:' + str(ext), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) return str_log += '下载成功,校验...\n' self.ui.textBrowser_log.setText(str_log) firm_data = open( self.dir_name + '/' + self.file_list[self.now_file_index], 'rb').read() read_firm = self.jlink.code_memory_read(0X08000000, len(firm_data)) if operator.eq(firm_data, bytes(read_firm)) is True: str_log += '校验成功\n请拨动拨码开关重启设备\n' self.ui.textBrowser_log.setText(str_log) self.now_file_index += 1 self.ui.lineEdit_FirmWare_Path_Useing.setText( self.file_list[self.now_file_index].split('_')[2]) self.ui.lineEdit_FirmWare_Firm_Index.setText('已下载%d个' % self.now_file_index) self.jlink.reset() else: str_log += '校验失败,需要重新下载 ' self.ui.textBrowser_log.setText(str_log) QMessageBox.warning(self, "下载固件", '校验失败', QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) self.ui.pushButton_down_firm.setEnabled(True)
#-*-coding=utf-8-* import sys print(bin(6)) print(len(bin(6))) # 5的二进制 多少位 ? #print(len(5)) print(callable(1)) print(chr(65)) # A if sys.version_info.major < 3: print(cmp(3, 4)) else: import operator print(operator.eq(3.0, 3.00000)) # False print(complex(1, 2) == complex("1+2j")) #True
def check_result(self, result): return eq(self.expect, result)
def str_cmp(): sStr1 = '1345' sStr2 = '13bc' n = 5 print(op.eq(sStr1[0:n], sStr2[0:n]))
if not label: label = name return """<a href="/desk#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals( ) operator_map = { # startswith "^": lambda (a, b): (a or "").startswith(b), # in or not in a list "in": lambda (a, b): operator.contains(b, a), "not in": lambda (a, b): not operator.contains(b, a), # comparison operators "=": lambda (a, b): operator.eq(a, b), "!=": lambda (a, b): operator.ne(a, b), ">": lambda (a, b): operator.gt(a, b), "<": lambda (a, b): operator.lt(a, b), ">=": lambda (a, b): operator.ge(a, b), "<=": lambda (a, b): operator.le(a, b), "not None": lambda (a, b): a and True or False, "None": lambda (a, b): (not a) and True or False } def compare(val1, condition, val2): ret = False if condition in operator_map: ret = operator_map[condition]((val1, val2))
def __ne__(self, other): if other is None: return self.exists() return Bool.must_not(operator.eq(self, other))