def render(self, renderer): renderer.table_header([ dict(name="Type", type="TreeNode", width=10, max_depth=10), dict(name="Address", style="address", padding="0"), dict(name="Name", width=20), dict(name="Device Type", width=30), dict(name="Path"), ]) for _, _, driver_obj, _, _ in self.generate_hits(): renderer.table_row( utils.AttributedString("DRV", [(0, 30, "BLACK", "RED")]), driver_obj.obj_offset, driver_obj.DriverName.v(vm=self.kernel_address_space), depth=0) first_device = driver_obj.DeviceObject.dereference( vm=self.kernel_address_space) for device in first_device.walk_list("NextDevice"): device_header = self.profile.Object( "_OBJECT_HEADER", offset=device.obj_offset - device.obj_profile.get_obj_offset("_OBJECT_HEADER", "Body"), vm=device.obj_vm) device_name = device_header.NameInfo.Name.cast( vm=self.kernel_address_space) renderer.table_row(utils.AttributedString( "DEV", [(0, 30, "WHITE", "BLUE")]), device.obj_offset, device_name, device.DeviceType, depth=1) level = 1 for att_device in device.walk_list("AttachedDevice"): renderer.table_row(utils.AttributedString( "ATT", [(0, 30, "BLACK", "GREEN")]), att_device.obj_offset, device_name, att_device.DeviceType, att_device.DriverObject.DriverName, depth=level) level += 1
def _render_node(self, query, node, renderer, depth=1): """Render an AST node and recurse.""" t = infer_type.infer_type(node, self) try: name = "(%s) <%s>" % (t.__name__, type(node).__name__) except AttributeError: name = "(%r) <%s>" % (t, type(node).__name__) renderer.table_row( name, utils.AttributedString( str(query), [dict(start=node.start, end=node.end, fg="RED", bold=True)]), depth=depth) for child in node.children: if isinstance(child, ast.Expression): self._render_node(node=child, renderer=renderer, query=query, depth=depth + 1) else: renderer.table_row("(%s) <leaf: %r>" % (type(child).__name__, child), None, depth=depth + 1)
def render_error(self, renderer): """Render the query parsing error in a user-friendly manner.""" renderer.section("Query Error") try: start = self.query_error.adjusted_start end = self.query_error.adjusted_end source = self.query_error.source text = self.query_error.text except AttributeError: # Maybe query_error isn't a subclass of EfilterError. Let's be # careful. start = None end = None source = self.query_source text = str(self.query_error) if start is not None and end is not None: renderer.format( "EFILTER error ({}) {} at position {}-{} in query:\n{}\n\n", type(self.query_error).__name__, repr(text), start, end, utils.AttributedString( source, [dict(start=start, end=end, fg="RED", bold=True)])) else: renderer.format("EFILTER error ({}) {} in query:\n{}\n", type(self.query_error).__name__, repr(text), source)
def collect(self): for driver_obj in self.generate_hits(): yield dict( Type=utils.AttributedString("DRV", [(0, 30, "BLACK", "RED")]), Address=driver_obj.obj_offset, Name=driver_obj.DriverName.v(vm=self.kernel_address_space), depth=0) first_device = driver_obj.DeviceObject.dereference( vm=self.kernel_address_space) for device in first_device.walk_list("NextDevice"): device_header = self.profile.Object( "_OBJECT_HEADER", offset=device.obj_offset - device.obj_profile.get_obj_offset("_OBJECT_HEADER", "Body"), vm=device.obj_vm) device_name = device_header.NameInfo.Name.cast( vm=self.kernel_address_space) yield dict(Type=utils.AttributedString( "DEV", [(0, 30, "WHITE", "BLUE")]), Address=device.obj_offset, Name=device_name, device_type=device.DeviceType, depth=1) level = 1 for att_device in device.walk_list("AttachedDevice", include_current=False): yield dict(Type=utils.AttributedString( "ATT", [(0, 30, "BLACK", "GREEN")]), Address=att_device.obj_offset, Name=device_name, device_type=att_device.DeviceType, Path=att_device.DriverObject.DriverName, depth=level + 1) level += 1
def BuildBaselineTask(self, config_options, plugin_cls): """Run the rekall test program. This runs in a separate thread on the thread pool. After running, we capture the output into a json baseline file, and print progress to the terminal. """ baseline_data = self.BuildBaselineData(config_options, plugin_cls) output_filename = os.path.join(self.test_directory, plugin_cls.__name__) with open(output_filename, "wb") as baseline_fd: baseline_fd.write(json.dumps(baseline_data, indent=4)) self.renderer.table_row( plugin_cls.__name__, utils.AttributedString("REBUILD", [(0, -1, "YELLOW", None)]), baseline_data["time_used"]) self.rebuilt += 1
def GetComment(self, start, end): """Returns a tuple of labels and their highlights.""" labels = [] for i in range(start, end): start, end, hit = self.collection.get_containing_range(i) if hit: if hit not in labels: labels.append(hit) result = "" highlights = [] for label, fg, bg in labels: highlights.append((len(result), len(result) + len(label), fg, bg)) result += label + ", " # Drop the last , if result: result = result[:-2] return utils.AttributedString(result, highlights=highlights)
def column_types(self): return dict(offset=int, hexdump=utils.HexDumpedString(""), comment=utils.AttributedString(""))
def _RunTestCase(self, config_options, plugin_cls, baseline_data): # If the control file tells us to stop we dont do anything. if self.CheckControlFile("action") == "stop": logging.info("Skipping test %s since control file is aborted.", plugin_cls.__name__) return if baseline_data['options'].get('aborted'): logging.info("Skipping test %s since baseline did not complete.", plugin_cls.__name__) return # Re-Run the current test again. current_run = self.BuildBaselineData(config_options, plugin_cls) test_cases = [] for name in dir(plugin_cls): if name.startswith("test"): test_cases.append( plugin_cls( name, baseline=baseline_data, config_options=config_options, current=current_run, debug=self.FLAGS.debug)) for test_case in test_cases: result = unittest.TestResult() return_code = current_run.get("return_code", 0) if return_code != 0: result.errors.append(("return_code", return_code)) test_case(result) current_run.setdefault("errors", {})[str(test_case)] = dict( (str(x), y) for x, y in result.errors) current_run.setdefault("failours", {})[str(test_case)] = dict( (str(x), y) for x, y in result.failures) # Store the current run someplace for closer inspection. output_path = os.path.join(self.output_dir, plugin_cls.__name__) with open(output_path, "wb") as fd: baseline_filename = os.path.join( self.test_directory, plugin_cls.__name__) fd.write(self.BASELINE_TEMPLATE % dict( src=fd.name, dest=baseline_filename, command=current_run["options"].get( "executed_command", # If command does not exists we launch the unittest. "python -m unittest %s.%s" % ( plugin_cls.__module__, plugin_cls.__name__)))) fd.write(json.dumps(current_run, indent=4)) # Make the output executable. os.chmod(output_path, 0770) if result.wasSuccessful(): self.renderer.table_row( test_case, utils.AttributedString("PASS", [(0, -1, "GREEN", None)]), current_run.get("time_used", 0), baseline_data.get("time_used", 0)) self.successes.append(plugin_cls.__name__) else: diff_path = output_path + ".diff" with open(diff_path, "wb") as diff_fd: subprocess.call( ["diff", "-y", "--width", "200", output_path, baseline_filename], stdout=diff_fd) if self.FLAGS.inline: print(open(output_path).read()) self.renderer.table_row( test_case, utils.AttributedString("FAIL", [(0, -1, "RED", None)]), current_run.get("time_used", 0), baseline_data.get("time_used", 0), fd.name) self.failures.append(plugin_cls.__name__) if self.FLAGS.verbose: for test_case, error in result.errors + result.failures: self.renderer.write("Error in %s: %s" % ( plugin_cls.__name__, error))
def render(self, renderer): renderer.table_header([ dict(name="Dogma", width=35, style="full"), dict(name="Bessy", width=65, type="bool", style="cow"), dict(name="Pilsner", width=50, style="full"), dict(name="Nowrap", width=10, nowrap=True) ]) fixtures = self.session.LoadProfile("tests/fixtures") beer = fixtures.data["ascii_art"]["beer"] phys_map = fixtures.data["fixtures"]["phys_map"] renderer.table_row( ("This is a renderer stress-test. The flags should have correct" " colors, the beer should be yellow and the cell on the left" " should not bleed into the cell on the right.\n" "This is a really " "long column of text with its own newlines in it!\n" "This bovine experience has been brought to you by Rekall."), True, utils.AttributedString("\n".join(beer["ascii"]), beer["highlights"]), ("This is a fairly long line that shouldn't get wrapped.\n" "The same row has another line that shouldn't get wrapped.")) renderer.section("Heatmap test:") cells = [] for digit in itertools.islice(algo.EulersDecimals(), 0xff): cells.append(dict(heat=float(digit + 1) * .1, value=digit)) randomized = visual_aides.Heatmap( caption="Offset (p)", # Some of the below xs stand for eXtreme. The other ones just # look cool. column_headers=["%0.2x" % x for x in xrange(0, 0xff, 0x10)], row_headers=["0x%0.6x" % x for x in xrange(0x0, 0xfffff, 0x10000)], cells=cells, greyscale=False) gradual = visual_aides.Heatmap( caption="Offset (v)", column_headers=["%0.2x" % x for x in xrange(0, 0xff, 0x10)], row_headers=["0x%0.6x" % x for x in xrange(0x0, 0xfffff, 0x10000)], cells=[dict(value="%x" % x, heat=x / 255.0) for x in xrange(256)], greyscale=False) ranges_legend = visual_aides.MapLegend(phys_map["ranges_legend"]) ranges = visual_aides.RunBasedMap(caption="Offset (p)", legend=ranges_legend, runs=phys_map["runs"]) renderer.table_header([ dict(name="Random Heatmap", style="full", width=60, align="c"), dict(name="Gradual Heatmap", style="full", width=60, align="c"), dict(name="Legend", style="full", orientation="horizontal") ]) renderer.table_row(randomized, gradual, visual_aides.HeatmapLegend()) renderer.table_header([ dict(name="Greyscale Random", style="full", width=60, align="c"), dict(name="Memory Ranges", style="full", width=80, align="c"), dict(name="Ranges Legend", style="full", width=30, orientation="vertical") ]) randomized.greyscale = True renderer.table_row(randomized, ranges, ranges_legend)