def ihexdump(mem, start_addr=0, compress=False): line_fmt = '{addr:08x} {0:8/ /23} {0:8/ /23} |{1:16//16}|' exs = [Ex(fmt='x', sz=1, endianess='='), Ex(fmt='c', sz=1, endianess='=')] it = Formatter(line_fmt, '*', *exs).lines(mem, start_addr, compress) if compress: yield from unique_justseen(it) else: yield from it
def detectValley(self, msg): #TODO: clean this function #TODO: check if the hegemony scores are not too old.. # zTd, zDt, zS, zOrig, zAS, zPfx, path, zPro, zOr, z0, z1, z2, z3, z4, z5 = msg path = list(unique_justseen(msg[4])) origas = path[-1] if not origas in self.hegemony: return hege = self.hegemony[origas] hegeAll = map(lambda x: round(hege[x], 2), path[1:]) # hegeAll = map(lambda x: hege[x], path[1:]) hege = list(unique_justseen(hegeAll)) hegeDiff = np.diff(hege) signChange = len(list(itertools.groupby(hegeDiff, lambda x: x >= 0))) if signChange > 1: # and not "27064" in path and not "27065" in path and not "27066" in path: # print "(pathMonitor) anomaly: %s (%s)" % (path[1:],hege) # Find suspicious transit AS prev = hegeAll[0] goingDown = False for i, d in enumerate(hegeAll[1:]): if goingDown and d > prev: #TODO compute anomalous score if not self.saverQueue is None: self.saverQueue.put(("anomalouspath", [ msg[1], str(msg), origas, path[i + 1], str(hege), d ])) # print "(pathMonitor) anomalous transit: %s" % path[i+1] # anomalousTransit[path[i+1]]+=1 #+1 because we ignore the peer AS in hegeAll if d >= prev: goingDown = False else: goingDown = True prev = d
def _obtain_routes(self, pass_through_waypoint: carla.Waypoint, total_length_m: float) \ -> List[List[carla.Transform]]: part_length_m = total_length_m / 2 forward_routes = self._topology.get_forward_routes(pass_through_waypoint, part_length_m) backward_routes = self._topology.get_backward_routes(pass_through_waypoint, part_length_m) routes = [ backward_route + forward_route for backward_route in backward_routes for forward_route in forward_routes ] return [list(unique_justseen(r)) for r in routes]
def _resample_route(self, route: List[carla.Transform], step_m: float) -> List[carla.Transform]: assert len(route) > 1 try: positions = [ Transform.from_carla_transform(transform).position for transform in route ] positions = unique_justseen(positions) positions = resample_points(positions, step_m=step_m) route = positions_to_transforms(positions) return route except Exception: LOGGER.error(f'#route={len(route)} route={route}') raise
def remove_duplicate_character(list_string): result = [] for s in list_string: if 'oo' in s: result.append(s) else: s = ''.join(list(unique_justseen(list(s)))) # replace acronym word by correct word if s in acronym_words: s = [key for key, value in data.items() if s in value][0] if s.isalnum() is False: continue result.append(s) return result
def resample_points(positions: List[Union[Vector2, Vector3]], step_m=1) -> List[Vector2]: """Interpolates points so they are evenly spaced (1 meter between each point)""" positions = [convert_to_vector2(p) for p in positions] points = np.array([p.as_numpy() for p in positions]) assert len(list(unique_justseen(positions))) == len(positions) # breaks resampling code x, y = zip(*points) DEGREE_ONE_SO_WE_HAVE_POLYLINE = 1 f, u = interpolate.splprep([x, y], s=0, k=DEGREE_ONE_SO_WE_HAVE_POLYLINE, per=0) distance = np.sqrt(np.sum(np.diff(points, axis=0) ** 2, axis=1)).sum() linspace = np.linspace(0, 1, int(distance / step_m)) x, y = interpolate.splev(linspace, f) points_fitted = np.stack([x, y], axis=1) return [Vector2.from_numpy(p) for p in points_fitted]
def wrapper(*args, **kwargs): return unique_justseen(func(*args, **kwargs))
def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_justseen('AABCcAD', str.lower) self.assertEqual(list('ABCAD'), list(u))
def test_justseen(self): """ensure only last item is remembered""" u = mi.unique_justseen('AAAABBBCCDABB') self.assertEqual(list('ABCDAB'), list(u))
def get_topics(iterable): for q in more_itertools.unique_justseen( (q for q in get_questions(iterable)), key=lambda x: x['qid']): topic = {k: q[k] for k in ('qid', 'question')} yield topic['question'], topic
def idisplay(spec, mem, endianess='=', start_addr=0, compress=False, extra_kargs={}): ''' >>> b1 = bytes.fromhex('255044462d312e320d25e2e3cfd30d0a323234372030206f626a0d3c3c200d2f4c696e656172697a65642031200d') 'db': display lines of 16 bytes from <mem> showing the address, then the bytes in hexadecimal and then the same bytes in ASCII. If the byte is not printable, a period is used. >>> display('db', b1) 00000000 25 50 44 46 2d 31 2e 32-0d 25 e2 e3 cf d3 0d 0a |%PDF-1.2.%......| 00000010 32 32 34 37 20 30 20 6f-62 6a 0d 3c 3c 20 0d 2f |2247 0 obj.<< ./| 00000020 4c 69 6e 65 61 72 69 7a-65 64 20 31 20 0d |Linearized 1 . | 'dc': display lines of 8 words (4 bytes each) from <mem> showing the address then the 8 words in hexadecimal and then the same bytes in ASCII. If the byte is not printable, a period is used. Note: the endianess by default is the endianess if the host/machine but it can be changed to big endian (>) or little endian (<). Note: if the input is not multiple of 4, the last bytes will not be displayed in the hexadecimal part because they don't form a 4-bytes word, however they will be displayed in the ASCII part. >>> display('dc', b1) 00000000 46445025 322e312d e3e2250d 0a0dd3cf |%PDF-1.2.%......| 00000010 37343232 6f203020 3c0d6a62 2f0d203c |2247 0 obj.<< ./| 00000020 656e694c 7a697261 31206465 |Linearized 1 . | >>> display('dc', b1, endianess='>') 00000000 25504446 2d312e32 0d25e2e3 cfd30d0a |%PDF-1.2.%......| 00000010 32323437 2030206f 626a0d3c 3c200d2f |2247 0 obj.<< ./| 00000020 4c696e65 6172697a 65642031 |Linearized 1 . | 'dd': display lines of 4 words (4 bytes each) from <mem> like in 'dc' but without the ASCII representation. >>> display('dd', b1) 00000000 46445025 322e312d e3e2250d 0a0dd3cf 00000010 37343232 6f203020 3c0d6a62 2f0d203c 00000020 656e694c 7a697261 31206465 'dD': display lines of 4 double precision float (8 bytes each) from <mem>. >>> display('dD', b1) 00000000 5.599436e-67 3.031161e-260 1.917431e+227 4.797675e-82 00000020 4.619119e+281 'df': like 'dD' but display simple precision floats (4 bytes each) >>> display('df', b1) 00000000 1.256404e+04 1.013931e-08 -8.343268e+21 6.828740e-33 00000010 1.074052e-05 4.957578e+28 8.631321e-03 1.283533e-10 00000020 7.036660e+22 3.030313e+35 2.334013e-09 'dq': display lines of 2 quads (8 bytes each) from <mem> >>> display('dq', b1) 00000000 322e312d46445025 0a0dd3cfe3e2250d 00000010 6f20302037343232 2f0d203c3c0d6a62 00000020 7a697261656e694c 'uu': disassembly the memory into instructions. Display as raw bytes the pieces of memory that couldn't be disassembled. >>> display('uu', b1) 00000000 and eax, 0x2d464450 00000005 xor dword ptr [rsi], ebp 00000007 xor cl, byte ptr [rip - 0x301c1ddb] 0000000d ror dword ptr [rip + 0x3432320a], cl 00000013 .byte 0x37 00000014 and byte ptr [rax], dh 00000016 and byte ptr [rdi + 0x62], ch 00000019 push 0xd 0000001b cmp al, 0x3c 0000001d and byte ptr [rip + 0x6e694c2f], cl 00000023 .byte 0x65 00000024 .byte 0x61 00000025 jb 0x90 00000027 jp 0x8e 00000029 and byte ptr fs:[rcx], dh 0000002c .byte 0x20 0000002d .byte 0x0d References: https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/d--da--db--dc--dd--dd--df--dp--dq--du--dw--dw--dyb--dyd--display-memor https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/u--unassemble- ''' if spec == 'db': line_fmt = '{addr:08x} {0:8/ /23}-{0:8/ /23} |{1:16//16}|' exs = [ Ex(fmt='x', sz=1, endianess=endianess), Ex(fmt='c', sz=1, endianess=endianess) ] elif spec == 'dc': line_fmt = '{addr:08x} {0:4/ /35} |{1:16//16}|' exs = [ Ex(fmt='x', sz=4, endianess=endianess), Ex(fmt='c', sz=1, endianess=endianess) ] elif spec == 'dd': line_fmt = '{addr:08x} {0:4/ /19}' exs = [Ex(fmt='x', sz=4, endianess=endianess)] elif spec == 'dD': line_fmt = '{addr:08x} {0:4/ }' exs = [Ex(fmt='f', sz=8, endianess=endianess)] elif spec == 'df': line_fmt = '{addr:08x} {0:4/ }' exs = [Ex(fmt='f', sz=4, endianess=endianess)] elif spec == 'dq': line_fmt = '{addr:08x} {0:2/ }' exs = [Ex(fmt='x', sz=8, endianess=endianess)] elif spec == 'uu': line_fmt = '{addr:08x} {0:1}' exs = [ Ex(fmt='i', sz=0, endianess=endianess, extra_kargs={ 'arch': extra_kargs.get('arch', 'x86'), 'mode': extra_kargs.get('mode', 64) }) ] else: raise ValueError("Spec '%s' not supported." % spec) it = Formatter(line_fmt, '*', *exs).lines(mem, start_addr, compress) if compress: yield from unique_justseen(it) else: yield from it
def pathDerivativeDist(): space = 1 af = 4 outDir = "../results/longStats_space%s_ipv%s/" % (space, af) dataDirectory = "/data/routeviews/archive.routeviews.org/route-views.linx/bgpdata/" filename = dataDirectory+"2016.06/UPDATES/updates.20160601.0000.bz2" centralityFile = outDir+"/%s%02d%02d_af%s.pickle" % (2016, 6, 15, af) asAggProb, asProb = pickle.load(open(centralityFile,"rb")) allDiff = [] linDiff = [] if filename.startswith("@bgpstream:"): p1 = Popen(["bgpreader", "-m", "-w", filename.rpartition(":")[2], "-p", "routeviews", "-c", "route-views.linx", "-t", "updates"], stdout=PIPE) else: p1 = Popen(["bgpdump", "-m", "-v", filename], stdout=PIPE, bufsize=-1) for line in p1.stdout: res = line[:-1].split('|',15) if res[5] == "0.0.0.0/0": continue if af != 0: if af == 4 and ":" in res[5]: continue elif af == 6 and "." in res[5]: continue if res[2] == "W": continue else: zTd, zDt, zS, zOrig, zAS, zPfx, sPath, zPro, zOr, z0, z1, z2, z3, z4, z5 = res path = list(unique_justseen(sPath.split(" "))) try : # hegeAll = map(lambda x: round(asAggProb[x],3), path[1:-1]) hegeAll = map(lambda x: asAggProb[x], path) hegeDiff = np.diff(hegeAll) allDiff.extend(list(hegeDiff)) N = 11 dim = np.linspace(0,1,N) interpData = np.interp(dim, np.linspace(0,1,len(hegeAll)), hegeAll) linDiff.append(interpData) except Exception: # print path # print "New AS" continue plt.figure() ecdf(allDiff) plt.xlim([-0.1, 0.1]) plt.xlabel("Hegemony derivative") plt.ylabel("CDF") plt.tight_layout() plt.savefig("../results/pathDerivative/derivativeDist.pdf") lda = np.array(linDiff) plt.figure() # plt.plot(dim, lda.mean(axis=0),"o-") plt.errorbar(dim, lda.mean(axis=0), lda.std(axis=0)/np.sqrt(N), fmt="o-") plt.xlabel("Relative position in the path") plt.ylabel("AS hegemony") plt.xlim([-0.03, 1.03]) plt.tight_layout() plt.savefig("../results/pathDerivative/meanHegemony.pdf") return lda
def xml_create_tag(xmltree, xpath, element, place_index=None, tag_order=None, occurrences=None, correct_order=True, several=True): """ This method evaluates an xpath expression and creates a tag in a xmltree under the returned nodes. If there are no nodes under the specified xpath an error is raised. The tag is appended by default, but can be inserted at a certain index (`place_index`) or can be inserted according to a given order of tags :param xmltree: an xmltree that represents inp.xml :param xpath: a path where to place a new tag :param element: a tag name or etree Element to be created :param place_index: defines the place where to put a created tag :param tag_order: defines a tag order :param occurrences: int or list of int. Which occurence of the parent nodes to create a tag. By default all nodes are used. :param correct_order: bool, if True (default) and a tag_order is given, that does not correspond to the given order in the xmltree (only order wrong no unknown tags) it will be corrected and a warning is given This is necessary for some edge cases of the xml schemas of fleur :param several: bool, if True multiple tags od the given name are allowed :raises ValueError: If the insertion failed in any way (tag_order does not match, failed to insert, ...) :returns: xmltree with created tags """ import copy from more_itertools import unique_justseen from masci_tools.io.common_functions import is_sequence if not etree.iselement(element): element_name = element try: element = etree.Element(element) except ValueError as exc: raise ValueError(f"Failed to construct etree Element from '{element_name}'") from exc else: element_name = element.tag parent_nodes = eval_xpath(xmltree, xpath, list_return=True) if len(parent_nodes) == 0: raise ValueError(f"Could not create tag '{element_name}' because atleast one subtag is missing. " 'Use create=True to create the subtags') if occurrences is not None: if not is_sequence(occurrences): occurrences = [occurrences] try: parent_nodes = [parent_nodes[occ] for occ in occurrences] except IndexError as exc: raise ValueError('Wrong value for occurrences') from exc for parent in parent_nodes: element_to_write = copy.deepcopy(element) if tag_order is not None: try: tag_index = tag_order.index(element_name) except ValueError as exc: raise ValueError(f"The tag '{element_name}' was not found in the order list. " f'Allowed tags are: {tag_order}') from exc behind_tags = tag_order[:tag_index] child_tags = [child.tag for child in parent.iterchildren()] #This ignores serial duplicates. With this out of order tags will be obvious e.g ['ldaU', 'lo','lo', 'ldaU'] #will result in ['ldaU', 'lo', 'ldaU'] existing_order = list(unique_justseen(child_tags)) #Does the input file have unknown tags extra_tags = set(existing_order).difference(set(tag_order)) if extra_tags: raise ValueError(f'Did not find existing elements in the tag_order list: {extra_tags}') if element_name in existing_order and not several: raise ValueError(f'The given tag {element_name} is not allowed to appear multiple times') #Is the existing order in line with the given tag_order if sorted(existing_order, key=tag_order.index) != existing_order: if not correct_order: raise ValueError('Existing order does not correspond to tag_order list\n' f'Expected order: {tag_order}\n' f'Actual order: {existing_order}') else: #Here we know that there are no unexpected tags in the order, so we can 'repair' the order warnings.warn('Existing order does not correspond to tag_order list. Correcting it\n' f'Expected order: {tag_order}\n' f'Actual order: {existing_order}') new_tag = copy.deepcopy(parent) #Remove all child nodes from new_tag (deepcopied so they are still on parent) for node in new_tag.iterchildren(): new_tag.remove(node) for tag in tag_order: #Iterate over all children with the given tag on the parent and append to the new_tag for node in parent.iterchildren(tag=tag): new_tag.append(node) #Now replace the parent node with the reordered node parent_of_parent = parent.getparent() index = parent_of_parent.index(parent) parent_of_parent.remove(parent) parent_of_parent.insert(index, new_tag) parent = new_tag for tag in reversed(behind_tags): existing_tags = list(parent.iterchildren(tag=tag)) if len(existing_tags) != 0: insert_index = parent.index(existing_tags[-1]) + 1 try: parent.insert(insert_index, element_to_write) except ValueError as exc: raise ValueError(f"Failed to insert element '{element_name}' behind '{tag}' tag") from exc break else: #This is the construct for reaching the end of the loop without breaking try: parent.insert(0, element_to_write) except ValueError as exc: raise ValueError( f"Failed to insert element '{element_name}' at the beginning of the order") from exc elif place_index is not None: #We just try to insert the new element at the index try: parent.insert(place_index, element_to_write) except ValueError as exc: raise ValueError(f"Failed to create element '{element_name}' at the place index '{place_index}' " f"to the parent '{parent.tag}'") from exc else: #We append the node and hope nothing breaks try: parent.append(element_to_write) except ValueError as exc: raise ValueError(f"Failed to append element '{element_name}' to the parent '{parent.tag}'") from exc etree.indent(xmltree) return xmltree