def raid_std_validation(raid_value=None): logger.info("Interface raid_std_validation enter") raid_std = { "RAID 0": {'pd_slots': range(1, 2), 'span_length': 1, 'checks': operator.ge, 'span_depth': 1}, "RAID 1": {'pd_slots': range(1, 3), 'span_length': 2, 'checks': operator.eq, 'span_depth': 1}, "RAID 5": {'pd_slots': range(1, 4), 'span_length': 3, 'checks': operator.ge, 'span_depth': 1}, "RAID 6": {'pd_slots': range(1, 5), 'span_length': 4, 'checks': operator.ge, 'span_depth': 1}, "RAID 10": {'pd_slots': range(1, 5), 'span_length': 2, 'checks': operator.eq, 'span_depth': 2}, "RAID 50": {'pd_slots': range(1, 7), 'span_length': 3, 'checks': operator.ge, 'span_depth': 2}, "RAID 60": {'pd_slots': range(1, 9), 'span_length': 4, 'checks': operator.ge, 'span_depth': 2} } status, message = "Success", "Invalid RAID level." if raid_std.get(raid_value['raid_level']) is not None: raid = raid_std.get(raid_value['raid_level']) if raid_value.get('upd_slots') is not None: if operator.ne(len(raid_value.get('upd_slots')), raid_value.get('pd_required')): status = "Failed" message = "Invalid physical disk slots or span length or span depth details." return {'Status': status, 'Message': message} if raid_value.get('pd_required') or raid_value.get('span_length') is not None: pd_slots = raid_value.get('pd_required') span_length = raid_value.get('span_length') if operator.ne(raid.get('span_depth'), raid_value.get('span_depth')): return {'Status': 'Failed', 'Message': 'Invalid span depth.'} if raid.get('checks')(pd_slots, len(raid.get('pd_slots'))): status = "Success" message = "span length validation is successful." elif raid.get('checks')(span_length, raid.get('span_length')): status = "Success" message = "physical disk slots validation is successful." else: status = "Failed" message = "Invalid physical disk slots or span length or span depth details." logger.info("Interface raid_std_validation exit") return {"Status": status, "Message": message}
def test_equality(self): item1 = menus.MenuItem(template_name='menu.html', activepattern='/item1', href='/item1/', icon='fa-item1', display="Item1", index=0) item1dup = menus.MenuItem(template_name='menu.html', activepattern='/item1', href='/item1/', icon='fa-item1', display="Item1", index=0) item2 = menus.MenuItem(template_name='menu.html', activepattern='/item2', href='/item2/', icon='fa-item2', display="Item2", index=0) self.assertTrue(item1 == item1) self.assertTrue(operator.eq(item1, item1)) self.assertTrue(item1 == item1dup) self.assertTrue(operator.eq(item1, item1dup)) self.assertTrue(item1dup == item1) self.assertTrue(operator.eq(item1dup, item1)) self.assertTrue(item1 != item2) self.assertTrue(operator.ne(item1, item2)) self.assertFalse(operator.ne(item1, item1dup)) self.assertFalse(item1 == 1) self.assertFalse(1 == item2) self.assertTrue(1 != item2) self.assertTrue(item2 != 1)
def validate_search_key_filed(case_type, response, expected_response, validation_query): if case_type.find("search") >= 0: search_filed = case_type[case_type.find("["):] search_filed_combine = "$.." + search_filed res_detail = response.responsejson if jsonpath.jsonpath(res_detail, search_filed_combine): key_filed_value = jsonpath.jsonpath(res_detail, search_filed_combine) else: key_filed_value =[] validate_key_filed_value = [] temp = search_filed_combine.count(",") + 1 for index in range(int(len(key_filed_value) / temp)): hh = tuple(key_filed_value[index * temp:(index + 1) * temp]) validate_key_filed_value.append(hh) res_detail.pop("data") if operator.ne(res_detail, eval(expected_response)): raise AssertionError( "validate_get_feature validation for message is fail, the expected result is %s, but the actual result is " "%s" % (expected_response, str(res_detail))) if validation_query: data_result_list_database = db.fetch_all(validation_query) if operator.ne(validate_key_filed_value, data_result_list_database): raise AssertionError("validate_get_feature validation for data is fail, the expected result is %s, but the actual result is " "%s" % (data_result_list_database, str(validate_key_filed_value)))
def test_richcompare(self): self.assertIs(complex.__eq__(1 + 1j, 1 << 10000), False) # in py3 this is returns NotImplemented so comment this out # self.assertRaises(TypeError, complex.__lt__, 1+1j, None) self.assertIs(complex.__eq__(1 + 1j, 1 + 1j), True) self.assertIs(complex.__eq__(1 + 1j, 2 + 2j), False) self.assertIs(complex.__ne__(1 + 1j, 1 + 1j), False) self.assertIs(complex.__ne__(1 + 1j, 2 + 2j), True) # self.assertFalse(complex.__eq__(1j, None), False) // in py3 these are NotIMplemented - so no longer supported # self.assertFalse(complex.__eq__(1j, NotImplemented), False) for i in range(1, 100): f = i / 100.0 self.assertIs(complex.__eq__(f + 0j, f), True) self.assertIs(complex.__ne__(f + 0j, f), False) self.assertIs(complex.__eq__(complex(f, f), f), False) self.assertIs(complex.__ne__(complex(f, f), f), True) self.assertRaises(TypeError, complex.__lt__, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, complex.__le__, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, complex.__gt__, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, complex.__ge__, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.lt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.le, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.gt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.ge, 1 + 1j, 2 + 2j) self.assertIs(operator.eq(1 + 1j, 1 + 1j), True) self.assertIs(operator.eq(1 + 1j, 2 + 2j), False) self.assertIs(operator.ne(1 + 1j, 1 + 1j), False) self.assertIs(operator.ne(1 + 1j, 2 + 2j), True)
def test_richcompare(self): self.assertIs(complex.__eq__(1+1j, 1<<10000), False) self.assertIs(complex.__lt__(1+1j, None), NotImplemented) self.assertIs(complex.__eq__(1+1j, 1+1j), True) self.assertIs(complex.__eq__(1+1j, 2+2j), False) self.assertIs(complex.__ne__(1+1j, 1+1j), False) self.assertIs(complex.__ne__(1+1j, 2+2j), True) for i in range(1, 100): f = i / 100.0 self.assertIs(complex.__eq__(f+0j, f), True) self.assertIs(complex.__ne__(f+0j, f), False) self.assertIs(complex.__eq__(complex(f, f), f), False) self.assertIs(complex.__ne__(complex(f, f), f), True) self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented) self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented) self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j) self.assertRaises(TypeError, operator.le, 1+1j, 2+2j) self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j) self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j) self.assertIs(operator.eq(1+1j, 1+1j), True) self.assertIs(operator.eq(1+1j, 2+2j), False) self.assertIs(operator.ne(1+1j, 1+1j), False) self.assertIs(operator.ne(1+1j, 2+2j), True)
def set_condition(self, matrix, operator=None, value=None): self.hasCondition = True mappings = { 'LT': op.lt, 'LE': op.le, 'EQ': op.eq, 'INEQ': op.ne } # not complete list if type(matrix) == pd.DataFrame: self.mark = [[False for j in range(self.info['width'])] for i in range(self.info['height'])] for i in range(self.info['height']): for j in range(self.info['width']): if operator is None: self.mark[i][j] = op.ne( matrix.loc[self.info['rows'][i]][self.info['cols'] [j]], 0) else: self.mark[i][j] = mappings[operator](matrix.loc[ self.info['rows'][i]][self.info['cols'][j]], value) if type(matrix) == pd.Series: self.mark = [[False] for i in range(self.info['height'])] for i in range(self.info['height']): if operator is None: self.mark[i][0] = op.ne(matrix.loc[self.info['rows'][i]], 0) else: self.mark[i][0] = mappings[operator]( matrix.loc[self.info['rows'][i]], value)
def _check_shape(params_map): """ check parameters including shape_dy, shape_x, shape_var, shape_mean and shape_gamma Parameters ---------- params_map: dict {"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance, "shape_mean": shape_mean, "shape_gamma": shape_gamma, "dtype": dtype, "kernel_name": kernel_name} Returns ------- None """ if operator.ne(tuple(params_map.get("shape_dy")), tuple(params_map.get("shape_x"))): raise RuntimeError("dy and x must have the same shape") if operator.ne(tuple(params_map.get("shape_var")), tuple(params_map.get("shape_mean"))): raise RuntimeError("variance and mean must have the same shape") shape_x = params_map.get("shape_x") shape_mean = params_map.get("shape_mean") shape_gamma = params_map.get("shape_gamma") check_shape(shape_x, param_name="input_x") check_shape(shape_mean, param_name="input_mean") check_shape(shape_gamma, param_name="input_gamma") _check_shape_mean(shape_x, shape_mean) _check_shape_gamma(shape_x, shape_gamma)
def test_richcompare(self): self.assertIs(complex.__eq__(1 + 1j, 1 << 10000), False) self.assertIs(complex.__lt__(1 + 1j, None), NotImplemented) self.assertIs(complex.__eq__(1 + 1j, 1 + 1j), True) self.assertIs(complex.__eq__(1 + 1j, 2 + 2j), False) self.assertIs(complex.__ne__(1 + 1j, 1 + 1j), False) self.assertIs(complex.__ne__(1 + 1j, 2 + 2j), True) for i in range(1, 100): f = i / 100.0 self.assertIs(complex.__eq__(f + 0j, f), True) self.assertIs(complex.__ne__(f + 0j, f), False) self.assertIs(complex.__eq__(complex(f, f), f), False) self.assertIs(complex.__ne__(complex(f, f), f), True) self.assertIs(complex.__lt__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(complex.__le__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(complex.__gt__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(complex.__ge__(1 + 1j, 2 + 2j), NotImplemented) self.assertRaises(TypeError, operator.lt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.le, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.gt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.ge, 1 + 1j, 2 + 2j) self.assertIs(operator.eq(1 + 1j, 1 + 1j), True) self.assertIs(operator.eq(1 + 1j, 2 + 2j), False) self.assertIs(operator.ne(1 + 1j, 1 + 1j), False) self.assertIs(operator.ne(1 + 1j, 2 + 2j), True)
def testNotEqual(self): """Test !=operator""" _dt = self.eDatetime - datetime.timedelta(1) _other = eventCalBase.event(2, 'event', _dt) self.failUnless(operator.ne(self.e, _other), 'a1 != a2 failed. a1=%s, a2=%s' % (self.e, _other)) self.failIf(operator.ne(_other, _other), 'a1 != a2 failed. a1=%s, a2=%s' % (_other, _other))
def remove_synonyms(self, content, grade): self.senior_highlight_list, self.senior_synonyms, self.junior_highlight_list, self.junior_synonyms = self.load_dictionary( ) if grade in self.senior_set: highscore_words = list() # 文中所有高亮词的集合 syn_words = list() # 在同义词表中的词组集合 unsyn_words = list() # 不在同义词表中的词组集合 remove_syn = list() # 去掉重复同义词的词组列表 new_highlight = list() # 最终生成的新的高亮词表 for word in self.senior_highlight_list: if re.findall(r'(?<= )' + word + r'(?![a-zA-Z])', content): highscore_words.append(word) #print("highscore_words:", highscore_words) for syn in self.senior_synonyms: for phrase in highscore_words: if phrase in syn: syn_words.append(phrase) # Handling duplicates in the synonym table temp = list() for word in syn_words: if word in syn: temp.append(word) if operator.ne(len(temp), 0): remove_syn.append(temp[0]) remove_syn = list(set(remove_syn)) unsyn_words = [l for l in highscore_words if l not in syn_words] senior_new_highlight = unsyn_words + remove_syn return senior_new_highlight elif grade in self.junior_set: highscore_words = list() # 文中所有高亮词的集合 syn_words = list() # 在同义词表中的词组集合 unsyn_words = list() # 不在同义词表中的词组集合 remove_syn = list() # 去掉重复同义词的词组列表 new_highlight = list() # 最终生成的新的高亮词表 for word in self.junior_highlight_list: if re.findall(r'(?<= )' + word + r'(?![a-zA-Z])', content): highscore_words.append(word) #print("highscore_words:", highscore_words) for syn in self.junior_synonyms: for phrase in highscore_words: if phrase in syn: syn_words.append(phrase) # Handling duplicates in the synonym table temp = list() for word in syn_words: if word in syn: temp.append(word) if operator.ne(len(temp), 0): remove_syn.append(temp[0]) remove_syn = list(set(remove_syn)) unsyn_words = [l for l in highscore_words if l not in syn_words] junior_new_highlight = unsyn_words + remove_syn return junior_new_highlight else: return []
def test_richcompare(self): self.assertRaises(TypeError, operator.lt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.le, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.gt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.ge, 1 + 1j, 2 + 2j) self.assertIs(operator.eq(1 + 1j, 1 + 1j), True) self.assertIs(operator.eq(1 + 1j, 2 + 2j), False) self.assertIs(operator.ne(1 + 1j, 1 + 1j), False) self.assertIs(operator.ne(1 + 1j, 2 + 2j), True)
def _pred(filter): """ Create a SqlAlchemy Binary Expression given a single search criteria. :param filter: A dictionary with keys 'name', 'op' and 'val', indicating the attribute name, comparison operation and the comparison value we wish to use. 'name' can be any supported attribute name of a variable (e.g. 'name'/'data_source'), or one of 'response'/'topic'/'umbrella'. 'op' can be one of 'like','eq','neq','gt','gte','lt','lte' (where the op name corresponds to the usual semantics of these binary comparison operators. Note that specifying 'like' performs a fuzzy (LIKE) query in the database. Corresponding wildcards ('%') must be provided in the 'val' attribute for this to take effect. 'val' is the value we wish to compare against. This is either a string or a numeric, corresponding to the type of the attribute specified by 'name'. Comparison against values work as expected, but comparison against another attribute is not supported. :return: A SqlAlchemy BinaryExpression object corresponding to given search criteria. """ # Note: We may not have a 'val' key (for is_null/is_not_null) name, op, val = filter['name'], filter['op'], filter.get('val') op = op.lower().strip() if name in variable_attrs: column = getattr(Variable, name) elif name == 'response': column = Response.label else: return api_error(400, "Invalid name for search.") if op == 'like': pred = column.like(val) elif op in ('eq', '=='): pred = operator.eq(column, val) elif op in ('neq', 'ne', '!='): pred = operator.ne(column, val) elif op in ('gt', '>'): pred = operator.gt(column, val) elif op in ('gte', 'ge', '>='): pred = operator.ge(column, val) elif op in ('lt', '<'): pred = operator.lt(column, val) elif op in ('lte', 'le', '<='): pred = operator.le(column, val) elif op in ('in', ): pred = column.in_(val) elif op in ('not_in', ): pred = ~column.in_(val) elif op in ('is_null', ): pred = operator.eq(column, None) elif op in ('is_not_null', ): pred = operator.ne(column, None) else: return api_error(400, "Unrecognized operator") return pred
def test_layout_comparison_operators(self): l3a, _ = Cl(3) l3b, _ = Cl(3) l4, _ = Cl(4) assert operator.eq(l3a, l3b) is True assert operator.eq(l3a, l4) is False assert operator.eq(l3a, None) is False assert operator.ne(l3a, l3b) is False assert operator.ne(l3a, l4) is True assert operator.ne(l3a, None) is True
def test_layout_comparison_operators(self, g3, g4): l3a = g3 l3b, _ = Cl(3) # need a new copy here l4 = g4 assert operator.eq(l3a, l3b) is True assert operator.eq(l3a, l4) is False assert operator.eq(l3a, None) is False assert operator.ne(l3a, l3b) is False assert operator.ne(l3a, l4) is True assert operator.ne(l3a, None) is True
def localScore(i, j, tmpCheckList, sequenceA, sequenceB, maxIndexSet): a = i b = j while op.ne(np.size(stringSet), 0): if (np.size(stringSet) > 0): if op.gt(np.size(tmpCheckList), 1): maxIndexSet[0:0] = [(i, j)] temporary_List[0:0] = [tmpCheckList[1:]] stringSet[0:0] = [(sequenceA, sequenceB)] if op.gt(np.size(tmpCheckList), 0): # we call this conditional to check if there are more than 1 possibility of having an optimal sequen if op.eq(tmpCheckList[0 * 1], 1): sequenceA = op.add(self.sequence_1[a - 1], sequenceA) sequenceB = op.add(self.sequence_2[b - 1], sequenceB) a = op.sub(a, 1) b = op.sub(b, 1) tmpCheckList.pop(0) elif op.eq(tmpCheckList[0 * 1], 2): sequenceA = self.sequence_1[a - 1] + sequenceA sequenceB = "_" + sequenceB a = op.sub(a, 1) tmpCheckList.pop(0) elif op.eq(tmpCheckList[0 * 1], 3): sequenceA = "_" + sequenceA sequenceB = self.sequence_2[b - 1] + sequenceB b = op.sub(b, 1) tmpCheckList.pop(0) localScore(a, b, tempList(a, b, 0), sequenceA, sequenceB, maxIndexSet) # here we call recursively call the localScore method again keeping in mind, there could have been more than 1 possibility else: if self.DP_Matrix[a][b] == 0: final_1st.append( sequenceA ) # we finally add the given sequences to the final list for output later on final_2nd.append(sequenceB) if op.ne(np.size(stringSet), 0): tempstringa = stringSet[0][0] tempstringb = stringSet[0][1] tempi = maxIndexSet[0][0] tempj = maxIndexSet[0][1] tempcheckarr = temporary_List[0] deleteOperations(stringSet, maxIndexSet, temporary_List) localScore(tempi, tempj, tempcheckarr, tempstringa, tempstringb, maxIndexSet) else: break
def __init__(self, args): if len(args) == 0: error("No parameters! Please use -h") sys.exit() else: signal.signal(signal.SIGINT, self.Sigint_handler) self.Parse_input(args) self.Set_logging() #Set logging self._path = os.path.abspath(".") if ne(self.option.cnr_avp, "None"): self.cnr_avp_file_name = os.path.join(self._path, self.option.cnr_avp) else: error_exit("You must input a cnr avp file") if ne(self.option.chx_avp, "None"): self.chx_avp_file_name = os.path.join(self._path, self.option.chx_avp) else: warning( "You don't set the chx avp file name, so change it to %s_chx.avp" % (self.cnr_avp_file_name.split(".")[0])) self.chx_avp_file_name = "%s_chx.avp" % ( self.cnr_avp_file_name.split(".")[0]) if eq(self.cnr_avp_file_name.split(".")[-1], "avp") or eq( self.cnr_avp_file_name.split(".")[-1], "ic"): self.cnr_avp = open(self.cnr_avp_file_name, "r") elif eq(self.cnr_avp_file_name.split(".")[-1], "gz"): self.cnr_avp = gzip.open(self.cnr_avp_file_name) else: error_exit("Wrong input avp format!") if self.option.bochs: self.initial_addr = int(self.option.major_dump_addr, 16) else: self.initial_addr = int(self.option.major_dump_addr, 16) - 0x100000 self.results_addr = int(self.option.major_dump_addr, 16) self.initial_flag = 0 self.results_flag = 0 self.program_flag = 0 self.initial = {} self.results = {} self.initial_dump = {} self.results_dump = {} self.initial_dump_chx = {} self.results_dump_chx = {} self.tbdm = [] self.chx_avp = open(self.chx_avp_file_name, "w") if not self.chx_avp: error_exit("Open %s failed" % (self.chx_avp_file_name))
def ne(A, B): """ A ≠ B """ if isinstance(A, int) and isinstance(B, int): return int(operator.ne(A, B)) if isinstance(A, str) and isinstance(B, str): return int(operator.ne(ord(A), ord(B))) try: return int(not fuzzyEquals(A, B)) except TypeError: return 1
def dynamic_filters_communications(filters): """Build a filter dynamically for communications table from query paramaters :param dict filters: Dictionary of arg-value pairs :returns: a list of filters which can be passed into SQL alchemy filter queries. :rtype: list """ filter_ops = [] for arg, value in filters.items(): if arg == 'last_id': filter_ops.append(operator.gt(Communications.id, value)) elif arg == 'receiver': filter_ops.append(operator.eq(Communications.receiver, value)) elif arg == 'sender': filter_ops.append(operator.eq(Communications.sender, value)) elif arg == 'ignore_sender': filter_ops.append(operator.ne(Communications.sender, value)) elif arg == 'max': max_comm = Communications.query.order_by( Communications.id.desc()).limit(1).first() max_id = -1 if max_comm is None else max_comm.id filter_ops.append(operator.eq(Communications.id, max_id)) else: pass return filter_ops
def evaluate(cond): # Method to evaluate the conditions if isinstance(cond,bool): return cond left, oper, right = cond if not model or not left in model.mgroup.fields: #check that the field exist return False oper = self.OPERAND_MAPPER.get(oper.lower(), oper) if oper == '=': res = operator.eq(model[left].get(model),right) elif oper == '!=': res = operator.ne(model[left].get(model),right) elif oper == '<': res = operator.lt(model[left].get(model),right) elif oper == '>': res = operator.gt(model[left].get(model),right) elif oper == '<=': res = operator.le(model[left].get(model),right) elif oper == '>=': res = operator.ge(model[left].get(model),right) elif oper == 'in': res = operator.contains(right, model[left].get(model)) elif oper == 'not in': res = operator.contains(right, model[left].get(model)) res = operator.not_(res) return res
def do_date(datenode) : (day,month,year) = extractdate(datenode) if le(month,0) or gt(month,12) : daysinmonth = 0 elif eq(month,9) or eq(month,4) or eq(month,6) or eq(month,11) : daysinmonth = 30 elif eq(month,2) : if eq(mod(year,4),0) and (julian or (ne(mod(year,100),0) or eq(mod(year,400),0))) : daysinmonth = 29 else : daysinmonth = 28 else : daysinmonth=31 future = 0 if gt(year,toyear) : future = 1 elif eq(year,toyear) : if gt(month,tomonth) : future=1 elif eq(month,tomonth) and gt(day,today) : future=1 if gt(day,daysinmonth) or future : out("*") if lt(year,0) : cols(d(year),6) else : if lt(year,10) : out("0") if lt(year,100) : out("0") if lt(year,1000) : out("0") out(d(year)) if lt(month,10) : out("0") out(d(month)) if lt(day,10) : out ("0") out(d(day)+" ")
def __init__(self): # xl to py formulas conversion for eval() self.__author__ = __author__ self.__version__ = __version__ # xl to py formula conversion self.fun_database = { 'IF' : lambda args : [args[0]*args[1]+(abs(args[0]-1)*args[2])][0],\ 'AVERAGE' : lambda args : np.average(args[0]),\ 'STDEV.P' : lambda args : np.std(args[0]),\ 'TRANSPOSE' : lambda args : np.transpose(args[0]),\ 'ABS' : lambda args : np.abs(args[0]),\ 'MMULT' : lambda args : np.dot(*args),\ 'IFERROR' : lambda args : self.pyxl_error(*args),\ 'SUM' : lambda args : np.sum(args[0]),\ 'COUNT' : lambda args : np.size(args[0]),\ 'SQRT' : lambda args : np.sqrt(args[0]),\ '^' : lambda args : np.power(*args),\ '<' : lambda args : np.float64(op.lt(*args)),\ '>' : lambda args : np.float64(op.gt(*args)),\ '<=' : lambda args : np.float64(op.le(*args)),\ '>=' : lambda args : np.float64(op.ge(*args)),\ '<>' : lambda args : np.float64(op.ne(*args)),\ '=' : lambda args : np.float64(op.eq(*args)),\ '+' : lambda args : np.add(*args),\ '-' : lambda args : np.subtract(*args),\ '/' : lambda args : np.divide(*args),\ '*' : lambda args : np.multiply(*args) }
def mk_test(self, test_str): ne = lambda *a: op.ne(*a) ne.onerror = True def existance(dp, k, v): if dp is None: dp = {} return (k in dp) == v existance.is_datapoint_test = True in_ = lambda a, b: a in b not_in_ = lambda a, b: not in_(a, b) ops = { '=': op.eq, '!=': ne, '<': op.lt, '<=': op.le, '>': op.gt, '>=': op.ge, "?=": existance, "in": in_, "!in": not_in_, } return ops[test_str]
def import_csv(collection, path): """ import csv file to MongoDB """ with TrackEntryExit("import_csv"): try: with open(path, "r") as csvfile: try: csv_dct = csv.DictReader(csvfile, delimiter=",") ins_csv_dct = collection.insert_many(csv_dct) except mer.BulkWriteError as err: LOGGER.info("insertion error: %s", err.details) with open(path, "r") as csvfile1: csv_dct1 = csv.DictReader(csvfile1, delimiter=",") csv_lst = list(csv_dct1) csv_err = 0 for item in csv_lst: tmp_lst = list(item.values()) no_err = [tmp_lst[i] for i in range(len(item))].count("") if op.ne(no_err, 0): csv_err = op.iadd(csv_err, no_err) LOGGER.info("inserted file has missing values") return len(ins_csv_dct.inserted_ids), csv_err except (FileNotFoundError, UnboundLocalError) as err: LOGGER.info("path-indicated file not found") LOGGER.info(err)
def neighbor(solution): """Generates a new random solution based on a previous one by adding spaces. There's a 50% probability of adding space in one of the parts of the solution. """ min_len = min(map(len, solution)) min_sol = list(filter(lambda x: len(x) == min_len, solution)) if operator.ne(len(solution), len(min_sol)): index = solution.index(min_sol[0]) i = random.randint(0, len(solution[index])) solution[index] = solution[index][:i] + "-" + solution[index][i:] else: index = random.randint(0, len(solution) - 1) if index: occur = [m.start() for m in re.finditer('-', solution[index])] i = random.choice(occur) sol_list = list(solution[index]) if i == 0: sol_list[i] = sol_list[i + 1] sol_list[i + 1] = '-' elif i == (len(solution[index]) - 1): sol_list[i] = sol_list[i - 1] sol_list[i - 1] = '-' else: if random.random() >= 0.5: sol_list[i] = sol_list[i + 1] sol_list[i + 1] = '-' else: sol_list[i] = sol_list[i - 1] sol_list[i - 1] = '-' solution[index] = "".join(sol_list) return solution
def __ne__( self, other: i_probability_distribution.IProbabilityDistribution ) -> "ProbabilityDistribution": new_result_map = self._combine_distributions( lambda a, b: 1 if operator.ne(a, b) else 0, other ) return ProbabilityDistribution(new_result_map)
def evaluate(self): result = None left = self.left.evaluate() right = self.right.evaluate() if self.operation == '+': result = operator.add(left, right) elif self.operation == '-': result = operator.sub(left, right) elif self.operation == '*': result = operator.mul(left, right) elif self.operation == '/': result = operator.div(left, right) elif self.operation == '^': result = operator.pow(left, right) elif self.operation == 'and': result = left and right elif self.operation == 'or': result = left or right elif self.operation == '<': result = operator.lt(left, right) elif self.operation == '<=': result = operator.le(left, right) elif self.operation == '==': result = operator.eq(left, right) elif self.operation == '!=': result = operator.ne(left, right) elif self.operation == '>': result = operator.gt(left, right) elif self.operation == '>=': result = operator.ge(left, right) elif self.operation == 'in': result = (left in right) return result
def column_cost(column): """Calculates the cost of the column using the sum-of-pairs(SP). Each match is +1, mismatch -1 and gap -2. Two gaps is 0, to avoid extra penalization. """ sp = 0 for comb in combinations(column, 2): if operator.eq(comb[0], comb[1]): if operator.ne(comb[0], '-'): sp += 1 else: if operator.ne(comb[0], '-') and operator.ne(comb[1], '-'): sp -= 1 else: sp -= 2 return sp
def check(): # 遍历数据目录1,核对数据目录2中的同名文件 filenames = fileIndex._getTxtFiles(dataDir[0]) for filename in filenames: data1 = txtfile.loadDict(os.path.join(dataDir[0], filename)) file2 = os.path.join(dataDir[1], filename) if os.path.exists(file2): data2 = txtfile.loadDict(file2) changed2 = False # 只需核对 data1 中的数据在 data2 中是否存在即可 for (key, value) in data1.items(): key = key[2:] if not data2.__contains__(key): print(file2, "缺少数据", key) # 自动补充上 newValue = list(map(unformatValue, value)) data2[key] = newValue changed2 = True print("自动补充数据", key, newValue) else: value2 = list(map(formatValue, data2[key])) if operator.ne(value, value2): print(filename, value, value2, key) if changed2: txtfile.saveDict(file2, data2)
def startBZKmonitor(self): children = self._ZK.get_children("/%s/%s" % (self._Type, self._VName)) #list(map(lambda child: self._ZK.get(child_path), children)) for child in children: child_path = "/%s/%s/%s" % (self._Type, self._VName, child) data, stat = self._ZK.get(child_path) # if eval(data.decode("utf-8"))['status'] == str(0): kbid = eval(data.decode("utf-8"))['Target'] if operator.ne(kbid, 'Null'): if time.time() > float( eval( data.decode("utf-8"))['update_time']) + TIMERHOURS: onebbox_address = "{\"Target\":\"Null\",\"Add\":\"%s\",\"status\":\"0\",\"update_time\":\"%f\"}" % ( eval(data.decode("utf-8"))['Add'], time.time()) onebbox_address = onebbox_address.encode('utf-8') self._ZK.set(child_path, onebbox_address) logger.info( 'set B Box node %s is null and status 0 because of timeout a half hour.' % child_path) tmp_node = "/%s/%s/%s/%s" % (self._VName, CBOX['Bk'], kbid, child) if self._ZK.exists(tmp_node): transaction = self._ZK.transaction() transaction.delete(tmp_node) transaction.commit() logger.info('delete a VM/k/kb/Box node %s.' % tmp_node)
def is_valid_members(member_ids): user_ids = User.objects.filter(id__in=member_ids).values_list('id', flat=True) if operator.ne(len(member_ids), len(user_ids)): raise InvalidMemberException return list(user_ids)
def lvar_ignore_ne(x, y): if (isvar(x) and isvar(y)) or (isinstance(x, type) and isinstance(y, type) and issubclass(x, Var) and issubclass(y, Var)): return False else: return ne(x, y)
def parse(instruction): global bigmax raw = instruction.split() compare = raw[4] value = int(raw[6]) if compare not in registers: registers[compare] = 0 ops = { '>': operator.gt(registers[compare], value), '<': operator.lt(registers[compare], value), '>=': operator.ge(registers[compare], value), '<=': operator.le(registers[compare], value), '==': operator.eq(registers[compare], value), '!=': operator.ne(registers[compare], value), } if ops[raw[5]]: target = raw[0] if target not in registers: registers[target] = 0 if raw[1] == "inc": bigsum = registers[target] + int(raw[2]) registers[target] = bigsum if bigsum > bigmax: bigmax = bigsum else: bigsum = registers[target] - int(raw[2]) registers[target] = bigsum if bigsum > bigmax: bigmax = bigsum
def check_selected_mem(self,name=""): if eq(name,""): for mem in self.selected: info("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) else: for mem in self.selected: if eq(mem["name"],name): debug("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) self.Comment("#### %s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) checking_mem = mem for mem in self.selected: if ne(mem["name"],checking_mem["name"]): if checking_mem["start"] + checking_mem["size"] <= mem["start"]: pass elif checking_mem["start"] >= mem["start"] + mem["size"]: pass else: error("%s: start addr is 0x%x and size is 0x%x"%(checking_mem["name"],checking_mem["start"],checking_mem["size"])) error("%s: start addr is 0x%x and size is 0x%x"%(mem["name"],mem["start"],mem["size"])) self.Error_exit("selected mem %s and selected mem %s overlap!"%(checking_mem["name"],mem["name"])) else: pass for mem in self.spare_range: if checking_mem["start"] + checking_mem["size"] <= mem["start"]: pass elif checking_mem["start"] >= mem["start"] + mem["size"]: pass else: error("%s: start addr is 0x%x and size is 0x%x"%(checking_mem["name"],checking_mem["start"],checking_mem["size"])) error("spare mem: start addr is 0x%x and size is 0x%x"%(mem["start"],mem["size"])) self.Error_exit("selected mem %s and spare mem overlap!"%(checking_mem["name"]))
def get_interface_request_data(self, interface_name, assert_name): ''' 获取接口请求数据 :param interface_name: 接口名称 :param assert_name: 断言名称 :return: ''' old_data = copy.deepcopy(self.yaml_data) request_data = old_data[interface_name][assert_name][ YAML_CONFIG_KEY.INTERFACE_REQUEST_DATA] if request_data == None: return request_data self.recursive_replace_json_expr(replace_value=request_data) if operator.ne( self.get_interface_case_req_method(interface_name, assert_name), 'post'): request_data = json.dumps(request_data) return request_data
def combination_req_data(self, interface_name=None, assert_name=None, host_key=None, method=None): url = self.get_interface_url(interface_name=interface_name, host_key=host_key) data = self.get_interface_request_data(interface_name=interface_name, assert_name=assert_name) headers = self.get_interface_req_headers(interface_name=interface_name, assert_name=assert_name, host_key=host_key) des = self.get_interface_des(interface_name=interface_name, assert_name=assert_name) setup = self.get_interface_setup_list(interface_name=interface_name, assert_name=assert_name) req_method = self.get_interface_case_req_method( interface_name, assert_name) req_method = req_method if operator.ne(method, None) else method json_expr = self.get_interface_json_path(interface_name, assert_name) return { YAML_CONFIG_KEY.INTERFACE_URL: url, YAML_CONFIG_KEY.INTERFACE_ASSERT_DATA: data, YAML_CONFIG_KEY.INTERFACE_REQUEST_HEADERS: headers, YAML_CONFIG_KEY.INTERFACE_CASE_DES: des, YAML_CONFIG_KEY.INTERFACE_ASSERT_DATA_SETUP: setup, YAML_CONFIG_KEY.INTERFACE_CACHE_METHOD: req_method, YAML_CONFIG_KEY.INTERFACE_JSON_PATH: json_expr }
def evaluate(cond): # Method to evaluate the conditions if isinstance(cond, bool): return cond left, oper, right = cond if not model or not left in model.mgroup.fields: #check that the field exist return False oper = self.OPERAND_MAPPER.get(oper.lower(), oper) if oper == '=': res = operator.eq(model[left].get(model), right) elif oper == '!=': res = operator.ne(model[left].get(model), right) elif oper == '<': res = operator.lt(model[left].get(model), right) elif oper == '>': res = operator.gt(model[left].get(model), right) elif oper == '<=': res = operator.le(model[left].get(model), right) elif oper == '>=': res = operator.ge(model[left].get(model), right) elif oper == 'in': res = operator.contains(right, model[left].get(model)) elif oper == 'not in': res = operator.contains(right, model[left].get(model)) res = operator.not_(res) return res
def specialcases(x): operator.lt(x,3) operator.le(x,3) operator.eq(x,3) operator.ne(x,3) operator.gt(x,3) operator.ge(x,3) is_operator(x,3) operator.__lt__(x,3) operator.__le__(x,3) operator.__eq__(x,3) operator.__ne__(x,3) operator.__gt__(x,3) operator.__ge__(x,3) # the following ones are constant-folded operator.eq(2,3) operator.__gt__(2,3)
def __init__(self,args): if len(args) == 0: error("No parameters! Please use -h") sys.exit() else: signal.signal(signal.SIGINT,self.Sigint_handler) self.Parse_input(args) self.Set_logging() #Set logging self._path = os.path.abspath(".") if ne(self.option.cnr_avp, "None"): self.cnr_avp_file_name = os.path.join(self._path, self.option.cnr_avp) else: error_exit("You must input a cnr avp file") if ne(self.option.chx_avp, "None"): self.chx_avp_file_name = os.path.join(self._path, self.option.chx_avp) else: warning("You don't set the chx avp file name, so change it to %s_chx.avp"%(self.cnr_avp_file_name.split(".")[0])) self.chx_avp_file_name = "%s_chx.avp"%(self.cnr_avp_file_name.split(".")[0]) if eq(self.cnr_avp_file_name.split(".")[-1],"avp") or eq(self.cnr_avp_file_name.split(".")[-1],"ic"): self.cnr_avp = open(self.cnr_avp_file_name,"r") elif eq(self.cnr_avp_file_name.split(".")[-1],"gz"): self.cnr_avp = gzip.open(self.cnr_avp_file_name) else: error_exit("Wrong input avp format!") if self.option.bochs: self.initial_addr = int(self.option.major_dump_addr,16) else: self.initial_addr = int(self.option.major_dump_addr,16)-0x100000 self.results_addr = int(self.option.major_dump_addr,16) self.initial_flag = 0 self.results_flag = 0 self.program_flag = 0 self.initial = {} self.results = {} self.initial_dump = {} self.results_dump = {} self.initial_dump_chx = {} self.results_dump_chx = {} self.tbdm = [] self.chx_avp = open(self.chx_avp_file_name,"w") if not self.chx_avp: error_exit("Open %s failed"%(self.chx_avp_file_name))
def setCondition(self, matrix, operator=None, value=None): self.hasCondition = True mappings = {'LT': op.lt, 'LE': op.le, 'EQ': op.eq , 'INEQ': op.ne} # not complete list if type(matrix) == pd.DataFrame: self.mark = [[False for j in range(self.info['width'])] for i in range(self.info['height'])] for i in range(self.info['height']): for j in range(self.info['width']): if operator == None: self.mark[i][j] = op.ne(matrix.loc[self.info['rows'][i]][self.info['cols'][j]], 0) else: self.mark[i][j] = mappings[operator](matrix.loc[self.info['rows'][i]][self.info['cols'][j]], value) if type(matrix) == pd.Series: self.mark = [[False] for i in range(self.info['height'])] for i in range(self.info['height']): if operator == None: self.mark[i][0] = op.ne(matrix.loc[self.info['rows'][i]], 0) else: self.mark[i][0] = mappings[operator](matrix.loc[self.info['rows'][i]], value)
def test_equality(self): item1 = menus.MenuItem( template_name='menu.html', activepattern='/item1', href='/item1/', icon='fa-item1', display="Item1", index=0 ) item1dup = menus.MenuItem( template_name='menu.html', activepattern='/item1', href='/item1/', icon='fa-item1', display="Item1", index=0 ) item2 = menus.MenuItem( template_name='menu.html', activepattern='/item2', href='/item2/', icon='fa-item2', display="Item2", index=0 ) self.assertTrue(item1 == item1) self.assertTrue(operator.eq(item1, item1)) self.assertTrue(item1 == item1dup) self.assertTrue(operator.eq(item1, item1dup)) self.assertTrue(item1dup == item1) self.assertTrue(operator.eq(item1dup, item1)) self.assertTrue(item1 != item2) self.assertTrue(operator.ne(item1, item2)) self.assertFalse(operator.ne(item1, item1dup)) self.assertFalse(item1 == 1) self.assertFalse(1 == item2) self.assertTrue(1 != item2) self.assertTrue(item2 != 1)
def test_ne(self): self.failUnless(operator.ne(1, 0)) self.failUnless(operator.ne(1, 0.0)) self.failIf(operator.ne(1, 1)) self.failIf(operator.ne(1, 1.0)) self.failUnless(operator.ne(1, 2)) self.failUnless(operator.ne(1, 2.0))
def validate_result(schema_instance_paths, found_mappings, norm, print_names=False, **kwargs): """ :param schema_instance_paths: list[str | io.IOBase] :param found_mappings: list[int] :return: (int, int, int, int) """ assert len(schema_instance_paths) == 2 out = kwargs.get('output', sys.stdout) schema_desc = tuple(map(read_schema_descriptor, schema_instance_paths)) rschema_desc = tuple(map(utilities.rdict, schema_desc)) if print_names: print(*map(os.path.basename, schema_instance_paths), sep=' => ', file=out) # build column mapping dictionary offset = kwargs.get('column_offset', 1) found_mappings = {k + offset: v + offset for k, v in enumerate(found_mappings) if v is not None} invalid_count = 0 impossible_count = 0 # find mismatches for found_mapping in found_mappings.items(): original_mapping = tuple(map(dict.__getitem__, schema_desc, found_mapping)) expected = rschema_desc[1].get(original_mapping[0]) if expected is None: impossible_count += 1 else: invalid_count += operator.ne(*original_mapping) print('found {2} => {3}, expected {2} => {0} -- {1}'.format( expected, 'ok' if found_mapping[1] == expected else 'MISMATCH!', *found_mapping), file=out) # find missing matches missing_count = 0 for k in rschema_desc[0].keys() | rschema_desc[1].keys(): v = rschema_desc[1].get(k) k = rschema_desc[0].get(k) if k is not None and v is not None and k not in found_mappings: print('expected {} => {} -- MISSED!'.format(k, v)) missing_count += 1 successful_count = len(found_mappings) - invalid_count - impossible_count print( '{} successful, {} invalid, {} impossible, and {} missing matches, ' 'norm = {:{}}'.format( successful_count, invalid_count, impossible_count, missing_count, norm, kwargs.get('number_format', '')), end='\n\n', file=out) return successful_count, invalid_count, impossible_count, missing_count
def get_comparision_verdict_with_text(comp_operator, actual_value, expected_value, expected_min_value, expected_max_value, property_name): """ comp_operator: Enum comp_operator values: for comparision property_name: string used for verdict text message. Return dictionary with verdict as bool and verdict text """ verdict = False expected_filter_string = 'None' if comp_operator == 'LESS_THAN': verdict = operator.lt(actual_value, expected_value) expected_filter_string = ' less than ' + str(expected_value) elif comp_operator == 'LESS_THAN_OR_EQUAL': verdict = operator.le(actual_value, expected_value) expected_filter_string = ' less than or equal to ' + str(expected_value) elif comp_operator == 'GREATER_THAN': verdict = operator.gt(actual_value, expected_value) expected_filter_string = ' greater than ' + str(expected_value) elif comp_operator == 'GREATER_THAN_OR_EQUAL': verdict = operator.ge(actual_value, expected_value) expected_filter_string = ' greater than or equal to ' + str(expected_value) elif comp_operator == 'EQUAL': verdict = operator.eq(actual_value, expected_value) expected_filter_string = ' equal to ' + str(expected_value) elif comp_operator == 'NOT_EQUAL': verdict = operator.ne(actual_value, expected_value) expected_filter_string = ' not equal to ' + str(expected_value) elif comp_operator == 'BETWEEN': verdict = operator.le(expected_min_value, actual_value) and \ operator.le(actual_value, expected_max_value) expected_filter_string = ' between ' + str(expected_min_value) + \ ' and ' + str(expected_max_value) + ', inclusive' else: raise Exception("Unsupported comparision operator {0}".format(comp_operator)) pass_fail_text = ' does not match ' if verdict is True: pass_fail_text = ' matches ' verdict_text = 'Actual ' + property_name + pass_fail_text + 'expected ' + \ property_name + '. Actual count: ' + str(actual_value) + \ '; expected count:' + expected_filter_string + '.' data = {} data[ProviderConst.VERDICT] = verdict data[ProviderConst.VERDICT_TEXT] = verdict_text return data
def mk_test(self, test_str): ne = lambda *a: op.ne(*a) ne.onerror = True existance = lambda dp, k, v: (k in dp) == v existance.is_datapoint_test = True ops = { '=': op.eq, '!=': ne, '<': op.lt, '<=': op.le, '>': op.gt, '>=': op.ge, "?=": existance, } return ops[test_str]
def test_ne(self): class C(object): def __ne__(self, other): raise SyntaxError self.assertRaises(TypeError, operator.ne) self.assertRaises(SyntaxError, operator.ne, C(), C()) self.assertTrue(operator.ne(1, 0)) self.assertTrue(operator.ne(1, 0.0)) self.assertFalse(operator.ne(1, 1)) self.assertFalse(operator.ne(1, 1.0)) self.assertTrue(operator.ne(1, 2)) self.assertTrue(operator.ne(1, 2.0))
def test_ne(self): class C(object): def __ne__(self, other): raise SyntaxError self.failUnlessRaises(TypeError, operator.ne) self.failUnlessRaises(SyntaxError, operator.ne, C(), C()) self.failUnless(operator.ne(1, 0)) self.failUnless(operator.ne(1, 0.0)) self.failIf(operator.ne(1, 1)) self.failIf(operator.ne(1, 1.0)) self.failUnless(operator.ne(1, 2)) self.failUnless(operator.ne(1, 2.0))
def parse_ic_file(ic_file): global reload_num global smm ic_initial_end = 0 ic_result_start = 0 ic_file_instr = [] index = 0 reload_num = 0 #info(ic_file) with open(ic_file,"r") as fd: while True: line = fd.readline() if line: line = line.strip() if re.search(r'TRACER REPLAY',line): ic_initial_end = 1 if re.search(r'SHUTDOWN: tracer checkpoint',line): ic_result_start = 1 if re.search(r'\"SMI\"',line): smm = 1 info("This vector include SMM, don't check") if (ic_result_start == 0) and (ic_initial_end == 1): m = re.search(r'I:0x(\w+):0x(\w+):\"(.*)\";',line) if m : data = m.group(2) addr = m.group(1) instr = m.group(3) if ne("TRACER DUMP",instr): ic_file_instr.append({"addr":addr,"data":data,"instr":instr}) #info(ic_file_instr[index]) index +=1 else: reload_num += 1 m = re.search(r'D:0x\w+:0x(\w+);',line) if m: ic_file_instr[-1]["data"] = m.group(1) + ic_file_instr[-1]["data"] #info("data is %s"%(line)) else: break return ic_file_instr
def hamming(str1, str2): """ Compute the Hamming distance between two strings. The Hamming distance (see :evobib:`Hamming1950`) is defined as the number of bits that are different between two vectors. Parameters ---------- str1 : str str to be compared to str2 str2 : str str to be compared to str1 Returns ------- _ : int the hamming distance """ return sum(operator.ne(*pair) for pair in zip_longest(str1, str2, fillvalue=None))
def get(self, filters, limit=0): """pass filters as: {"key": "val", "key": ["!=", "val"], "key": ["in", "val"], "key": ["not in", "val"], "key": "^val"}""" # map reverse operations to set add = False import operator ops_map = { "!=": lambda (a, b): operator.ne(a, b), "in": lambda (a, b): operator.contains(b, a), "not in": lambda (a, b): not operator.contains(b, a) } out = [] for doc in self: d = isinstance(getattr(doc, "fields", None), dict) and doc.fields or doc add = True for f in filters: fval = filters[f] if isinstance(fval, list): if fval[0] in ops_map and not ops_map[fval[0]]((d.get(f), fval[1])): add = False break elif isinstance(fval, basestring) and fval.startswith("^"): if not (d.get(f) or "").startswith(fval[1:]): add = False break elif d.get(f)!=fval: add = False break if add: out.append(doc) if limit and (len(out)-1)==limit: break return DocList(out)
def test_tcp_opts_change(monkeypatch): monkeypatch_platform(monkeypatch, 'linux', '2.6.36-1-amd64') import amqp.platform reload_module(amqp.platform) old_linux = amqp.platform.KNOWN_TCP_OPTS monkeypatch_platform(monkeypatch, 'linux', '2.6.37-0-41-generic') reload_module(amqp.platform) new_linux = amqp.platform.KNOWN_TCP_OPTS monkeypatch_platform(monkeypatch, 'win32', '7') reload_module(amqp.platform) win = amqp.platform.KNOWN_TCP_OPTS monkeypatch_platform(monkeypatch, 'linux', '4.4.0-43-Microsoft') reload_module(amqp.platform) win_bash = amqp.platform.KNOWN_TCP_OPTS li = [old_linux, new_linux, win, win_bash] assert all(operator.ne(*i) for i in itertools.combinations(li, 2)) assert len(win) <= len(win_bash) < len(old_linux) < len(new_linux)
def execute_binary_operator(cls, val, x, y): """Execute binary operators Execute binary operator Arguments: val {int} -- int x {int} -- int y {int} -- int Returns: int -- operation result """ if val == 0: return operator.add(x,y) elif val == 1: return operator.sub(x,y) elif val == 2: return operator.mul(x,y) elif val == 3: return operator.div(x,y) elif val == 4: return operator.lt(x,y) elif val == 5: return operator.gt(x,y) elif val == 6: return operator.le(x,y) elif val == 7: return operator.ge(x,y) elif val == 8: return operator.eq(x,y) elif val == 9: return operator.ne(x,y) elif val == 12: return operator.mod(x,y)
def __ne__(self, other): return operator.ne(self.obj, other.obj)
""" logger = logging.getLogger('pyresttest.validators') # Binary comparison tests COMPARATORS = { 'count_eq': lambda x, y: safe_length(x) == y, 'lt': operator.lt, 'less_than': operator.lt, 'le': operator.lt, 'less_than_or_equal': operator.lt, 'eq': operator.eq, 'equals': operator.eq, 'str_eq': lambda x, y: operator.eq(str(x), str(y)), 'str_not_eq': lambda x, y: operator.ne(str(x), str(y)), 'ne': operator.ne, 'not_equals': operator.ne, 'ge': operator.ge, 'greater_than_or_equal': operator.ge, 'gt': operator.gt, 'greater_than': operator.gt, 'contains': lambda x, y: x and operator.contains(x, y), # is y in x 'contained_by': lambda x, y: y and operator.contains(y, x), # is x in y 'regex': lambda x, y: regex_compare(str(x), str(y)), 'type': lambda x, y: test_type(x, y) } COMPARATORS['length_eq'] = COMPARATORS['count_eq'] # Allow for testing basic types in comparators TYPES = {
def get_url_to_form(doctype, name, label=None): if not label: label = name return """<a href="/desk#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals() operator_map = { # startswith "^": lambda (a, b): (a or "").startswith(b), # in or not in a list "in": lambda (a, b): operator.contains(b, a), "not in": lambda (a, b): not operator.contains(b, a), # comparison operators "=": lambda (a, b): operator.eq(a, b), "!=": lambda (a, b): operator.ne(a, b), ">": lambda (a, b): operator.gt(a, b), "<": lambda (a, b): operator.lt(a, b), ">=": lambda (a, b): operator.ge(a, b), "<=": lambda (a, b): operator.le(a, b), "not None": lambda (a, b): a and True or False, "None": lambda (a, b): (not a) and True or False } def compare(val1, condition, val2): ret = False if condition in operator_map: ret = operator_map[condition]((val1, val2)) return ret
def ne_usecase(x, y): return operator.ne(x, y)
if report_type == "Report Builder": return get_url(uri = "desk#Report/{0}/{1}".format(quoted(doctype), quoted(name))) else: return get_url(uri = "desk#query-report/{0}".format(quoted(name))) operator_map = { # startswith "^": lambda a, b: (a or "").startswith(b), # in or not in a list "in": lambda a, b: operator.contains(b, a), "not in": lambda a, b: not operator.contains(b, a), # comparison operators "=": lambda a, b: operator.eq(a, b), "!=": lambda a, b: operator.ne(a, b), ">": lambda a, b: operator.gt(a, b), "<": lambda a, b: operator.lt(a, b), ">=": lambda a, b: operator.ge(a, b), "<=": lambda a, b: operator.le(a, b), "not None": lambda a, b: a and True or False, "None": lambda a, b: (not a) and True or False } def evaluate_filters(doc, filters): '''Returns true if doc matches filters''' if isinstance(filters, dict): for key, value in iteritems(filters): f = get_filter(None, {key:value}) if not compare(doc.get(f.fieldname), f.operator, f.value): return False
def aggregate_data(self, request, queryset): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=aggregated_data.csv' writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)""" result_dict = {} dict_time_constraint= {"Independent": 0, "Soft": 1, "Hard": 2} dict_answer_validity = {"Short": 0, "Medium": 1, "Long": 2} dict_generality_applicability = {"Low": 0, "Medium": 1, "High": 2} dict_location_constraint = {"Low": 0, "High": 1} dict_degree_knowledge = dict_location_constraint.copy() dict_boolean = {False: 0, True: 1} #dict_mobility_sociality = dict_boolean.copy() dict_costs_parameters = {"Free": 0, "Partially Free": 1, "Fee Based": 2} dict_info_provider = dict_mobility_sociality = dict_boolean.copy() posts_evaluated = list(set([p.post_url for p in WebsiteEvaluation.objects.all()])) #print posts_evaluated print dict_info_provider list_test=[p.evaluation_axis.all() for p in posts_evaluated] print list_test for i in list_test: result_dict[i[0]]=[[x.post_url.website.name,x.post_url.website.category,x.post_url,x.time_constraint,x.answer_validity,x.generality_applicability, x.location_constraint, x.degree_knowledge,x.costs_parameters,x.info_provider_layman,x.info_provider_operator, x.info_provider_expert,x.mobile_context,x.spatial_coordinates,x.ask_questions,x.suggestions, x.comment,x.personal_profile,x.others_information_need,x.contact_user] for x in i] website_list=[] category_list=[] url_list=[] result_list=[] #print result_dict for key in result_dict.keys(): """self.total_time_constraint_count=0 self.total_answer_validity_count=0 self.total_generality_applicability_count=0 self.total_location_constraint_count=0 self.total_degree_knowledge_count=0 self.total_costs_parameters_count=0 self.total_info_provider_layman_count=0 self.total_info_provider_operator_count=0 self.total_info_provider_expert_count=0 self.total_mobile_context_count=0 self.total_spatial_coordinates_count=0 self.total_ask_questions_count=0 self.total_suggestions_count=0 self.total_comment_count=0 self.total_personal_profile_count=0 self.total_others_information_need_count=0 self.total_contact_user_count=0""" self.total_time_constraint_count=self.total_answer_validity_count=self.total_generality_applicability_count\ =self.total_location_constraint_count=self.total_degree_knowledge_count=self.total_costs_parameters_count\ =self.total_info_provider_layman_count=self.total_info_provider_operator_count=self.total_info_provider_expert_count\ =self.total_mobile_context_count=self.total_spatial_coordinates_count=self.total_ask_questions_count=self.total_suggestions_count\ =self.total_comment_count=self.total_personal_profile_count=self.total_others_information_need_count=self.total_contact_user_count=0 print result_dict[key] for rating in result_dict[key]: self.website = rating[0] self.category = rating[1] self.url = rating[2] self.total_time_constraint_count+=dict_time_constraint[rating[3]] self.total_answer_validity_count+=dict_answer_validity[rating[4]] self.total_generality_applicability_count+=dict_generality_applicability[rating[5]] self.total_location_constraint_count+=dict_location_constraint[rating[6]] self.total_degree_knowledge_count+=dict_degree_knowledge[rating[7]] self.total_costs_parameters_count+=dict_costs_parameters[rating[8]] self.total_info_provider_layman_count+=dict_info_provider[rating[9]] self.total_info_provider_operator_count+=dict_info_provider[rating[10]] self.total_info_provider_expert_count+=dict_info_provider[rating[11]] self.total_mobile_context_count+=dict_mobility_sociality[rating[12]] self.total_spatial_coordinates_count+=dict_mobility_sociality[rating[13]] self.total_ask_questions_count+=dict_mobility_sociality[rating[14]] self.total_suggestions_count+=dict_mobility_sociality[rating[15]] self.total_comment_count+=dict_mobility_sociality[rating[16]] self.total_personal_profile_count+=dict_mobility_sociality[rating[17]] self.total_others_information_need_count+=dict_mobility_sociality[rating[18]] self.total_contact_user_count+=dict_mobility_sociality[rating[19]] total_calc = [self.total_time_constraint_count, self.total_answer_validity_count, self.total_generality_applicability_count,self.total_location_constraint_count, self.total_degree_knowledge_count,self.total_costs_parameters_count,self.total_info_provider_layman_count, self.total_info_provider_operator_count,self.total_info_provider_expert_count,self.total_mobile_context_count,self.total_spatial_coordinates_count, self.total_ask_questions_count, self.total_suggestions_count,self.total_comment_count, self.total_personal_profile_count, self.total_others_information_need_count,self.total_contact_user_count] avg = [a/float(len(result_dict[key])) for a in total_calc] result_list.append(avg) website_list.append(self.website) category_list.append(self.category) url_list.append(self.url) writer.writerow([ smart_str(u"Website"), smart_str(u"Category"), smart_str(u"Url"), smart_str(u"Time Constraint"), smart_str(u"Answer Validity"), smart_str(u"Generality Of Applicability"), smart_str(u"Location Dependency"), smart_str(u"Knowledge Codification"), smart_str(u"Costs Category"), smart_str(u"Information Provider Layman"), smart_str(u"Information Provider Operator"), smart_str(u"Information Provider Expert"), smart_str(u"Mobile Context"), smart_str(u"Spatial Coordinates"), smart_str(u"Ask Questions"), smart_str(u"Give Suggestions"), smart_str(u"Rate or Comment"), smart_str(u"Create Personal Profile"), smart_str(u"Others Information Needs"), smart_str(u"Contact Other Users") ]) i=0 if(len(result_list)==len(url_list)==len(category_list)==len(website_list)): while operator.ne(i,len(result_list)) and operator.ne(i,len(url_list)) and operator.ne(i,len(category_list)) and \ operator.ne(i,len(website_list)): """writer.writerow([ smart_str(url_list[i]), smart_str(category_list[i]), smart_str(dict_time_constraint.keys()[dict_time_constraint.values().index(result_list[i][0])]), smart_str(dict_answer_validity.keys()[dict_answer_validity.values().index(result_list[i][1])]), smart_str(dict_generality_applicability.keys()[dict_generality_applicability.values().index(result_list[i][2])]), smart_str(dict_location_constraint.keys()[dict_location_constraint.values().index(result_list[i][3])]), smart_str(dict_degree_knowledge.keys()[dict_degree_knowledge.values().index(result_list[i][4])]) ])""" writer.writerow([ smart_str(website_list[i]), smart_str(category_list[i]), smart_str(url_list[i]), smart_str(result_list[i][0]), smart_str(result_list[i][1]), smart_str(result_list[i][2]), smart_str(result_list[i][3]), smart_str(result_list[i][4]), smart_str(result_list[i][5]), smart_str(result_list[i][6]), smart_str(result_list[i][7]), smart_str(result_list[i][8]), smart_str(result_list[i][9]), smart_str(result_list[i][10]), smart_str(result_list[i][11]), smart_str(result_list[i][12]), smart_str(result_list[i][13]), smart_str(result_list[i][14]), smart_str(result_list[i][15]), smart_str(result_list[i][16]), ]) i+=1 else: pass return response
def cmp_ne(x,y): return _op.ne(x,y) @cutype("(a, a) -> Bool")