def add_minor_inst(self, rest): """Parse and add a MinorInst line to the model""" pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) id = Id().from_string(pairs['id']) del other_pairs['id'] addr = int(pairs['addr'], 0) del other_pairs['addr'] if 'inst' in other_pairs: del other_pairs['inst'] # Collapse unnecessary spaces in disassembly disassembly = re.sub(' *', ' ', re.sub('^ *', '', pairs['inst'])) inst = Inst(id, disassembly, addr, other_pairs) self.add_inst(inst) elif 'fault' in other_pairs: del other_pairs['fault'] inst = InstFault(id, pairs['fault'], addr, other_pairs) self.add_inst(inst)
def __init__(self): self.initials = parse.parse_initials() self.I = list(set([i[0] for i in self.initials])) self.closings = parse.parse_closings() self.C = list(set([i[0] for i in self.closings])) ret = parse.parse_midpoints() self.midpoints = ret['chains'] self.midpoint_ex = ret['exemplars'] self.action_pairs = parse.parse_pairs() self.noc = parse.parse_NOC() self.exemplars = parse.parse_exemplars() self.action_graph = graph.make_graph(self.action_pairs['pairs'], self.action_pairs['links']) self.idiomatics = parse.parse_idiomatics() self.locations = parse.parse_locations() self.character_templates = templates.CHARACTER_DESCRIPTIONS self.location_templates = templates.SETTING_DESCRIPTIONS self.character_properties = parse.parse_character_properties() try: self.gs = GS(Word2Vec.load("../data/word2vec/w2v_103.model")) except: pass
def add_minor_line(self, rest): """Parse and add a MinorLine line to the model""" pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) id = Id().from_string(pairs['id']) del other_pairs['id'] vaddr = int(pairs['vaddr'], 0) del other_pairs['vaddr'] if 'paddr' in other_pairs: del other_pairs['paddr'] del other_pairs['size'] paddr = int(pairs['paddr'], 0) size = int(pairs['size'], 0) self.add_line(Line(id, vaddr, paddr, size, other_pairs)) elif 'fault' in other_pairs: del other_pairs['fault'] self.add_line(LineFault(id, pairs['fault'], vaddr, other_pairs))
return [] start = choice(starting_paths) ending = choice(ending_paths) return start + bmp[1:-1] + [mp] + amp[1:-1] + ending def get_links(G, action_list): links = [] for i,a in enumerate(action_list[:-1]): data = G.get_edge_data(a, action_list[i+1]) link = data['link'] links.append(link) return links if __name__ == "__main__": d = parse.parse_pairs() G = make_graph(d['pairs'], d['links']) print nx.has_path(G, 'are_marketed_by', 'take_advantage_of') M = parse.parse_midpoints() chains = M['chains'] I = parse.parse_initials() initials = list(set([i[0] for i in I])) C = parse.parse_closings() closings = list(set([c[0] for c in C])) nodes = G.nodes() ''' midb = list(set([e[0] for e in chains])) mida = list(set([e[2] for e in chains])) i = 0 n = 0
def load_events(self, file, startTime=0, endTime=None): """Load an event file and add everything to this model""" def update_comments(comments, time): # Add a list of comments to an existing event, if there is one at # the given time, or create a new, correctly-timed, event from # the last event and attach the comments to that for commentUnit, commentRest in comments: event = self.find_unit_event_by_time(commentUnit, time) # Find an event to which this comment can be attached if event is None: # No older event, make a new empty one event = BlobEvent(commentUnit, time, {}) self.add_unit_event(event) elif event.time != time: # Copy the old event and make a new one with the right # time and comment newEvent = BlobEvent(commentUnit, time, event.pairs) newEvent.visuals = dict(event.visuals) event = newEvent self.add_unit_event(event) event.comments.append(commentRest) self.clear_events() # A negative time will *always* be different from an event time time = -1 time_events = {} last_time_lines = {} minor_trace_line_count = 0 comments = [] default_colour = [[colours.unknownColour]] next_progress_print_event_count = 1000 if not os.access(file, os.R_OK): print 'Can\'t open file', file exit(1) else: print 'Opening file', file f = open(file) start_wall_time = wall_time() # Skip leading events still_skipping = True l = f.readline() while l and still_skipping: match = re.match('^\s*(\d+):', l) if match is not None: event_time = match.groups() if int(event_time[0]) >= startTime: still_skipping = False else: l = f.readline() else: l = f.readline() match_line_re = re.compile( '^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$') # Parse each line of the events file, accumulating comments to be # attached to MinorTrace events when the time changes reached_end_time = False while not reached_end_time and l: match = match_line_re.match(l) if match is not None: event_time, unit, line_type, rest = match.groups() event_time = int(event_time) unit = re.sub('^' + self.unitNamePrefix + '\.?(.*)$', '\\1', unit) # When the time changes, resolve comments if event_time != time: if self.numEvents > next_progress_print_event_count: print ('Parsed to time: %d' % event_time) next_progress_print_event_count = ( self.numEvents + 1000) update_comments(comments, time) comments = [] time = event_time if line_type is None: # Treat this line as just a 'comment' comments.append((unit, rest)) elif line_type == 'MinorTrace:': minor_trace_line_count += 1 # Only insert this event if it's not the same as # the last event we saw for this unit if last_time_lines.get(unit, None) != rest: event = BlobEvent(unit, event_time, {}) pairs = parse.parse_pairs(rest) event.pairs = pairs # Try to decode the colour data for this event blobs = self.unitNameToBlobs.get(unit, []) for blob in blobs: if blob.visualDecoder is not None: event.visuals[blob.picChar] = ( blob.visualDecoder(pairs)) self.add_unit_event(event) last_time_lines[unit] = rest elif line_type == 'MinorInst:': self.add_minor_inst(rest) elif line_type == 'MinorLine:': self.add_minor_line(rest) if endTime is not None and time > endTime: reached_end_time = True l = f.readline() update_comments(comments, time) self.extract_times() f.close() end_wall_time = wall_time() print 'Total events:', minor_trace_line_count, 'unique events:', \ self.numEvents print 'Time to parse:', end_wall_time - start_wall_time