def __init__(self, output=None, send_message_callback=None, **kwargs): super(JsonRenderer, self).__init__(**kwargs) self.send_message_callback = send_message_callback # Allow the user to dump all output to a file. self.output = output # This keeps a list of object renderers which we will use for each # column. self.object_renderers = [] fd = None if self.output: if hasattr(self.output, "write") and hasattr(self.output, "flush"): fd = self.output else: # This overwrites the output file with a new json message. fd = open(self.output, "wb") if fd == None: fd = self.session.fd if fd == None: fd = sys.stdout self.fd = fd self.encoder = JsonEncoder(session=self.session, renderer=self) self.decoder = JsonDecoder(session=self.session, renderer=self) # A general purpose cache for encoders and decoders. self.cache = utils.FastStore(100) self.data = []
def __init__(self, output=None, **kwargs): super(JsonRenderer, self).__init__(**kwargs) # Allow the user to dump all output to a file. self.output = output or self.session.GetParameter("output") # This keeps a list of object renderers which we will use for each # column. self.object_renderers = [] fd = None if self.output: # This overwrites the output file with a new json message. fd = open(self.output, "wb") if fd == None: fd = self.session.fd if fd == None: fd = sys.stdout self.fd = fd self.encoder = JsonEncoder(compression=False, renderer=self) self.decoder = JsonDecoder(session=self.session, renderer=self) # A general purpose cache for encoders and decoders. self.cache = utils.FastStore(100)
def __init__(self, name=None, dtb=None, **kwargs): """Instantiate an Intel 32 bit Address space over the layered AS. Args: dtb: The dtb address. """ super(IA32PagedMemory, self).__init__(**kwargs) # We must be stacked on someone else: if self.base == None: raise TypeError("No base Address Space") # If the underlying address space already knows about the dtb we use it. # Allow the dtb to be specified in the session. self.dtb = dtb or self.session.GetParameter("dtb") if not self.dtb != None: raise TypeError("No valid DTB specified. Try the find_dtb" " plugin to search for the dtb.") self.name = (name or 'Kernel AS') + "@%#x" % self.dtb # Use a TLB to make this faster. self._tlb = addrspace.TranslationLookasideBuffer(1000) self._cache = utils.FastStore(100) # Some important masks we can use. # Is the pagesize flags on? self.page_size_mask = (1 << 7)
def __init__(self, session): self._context_cache = utils.FastStore(max_size=10000) self.last_pfn_id = -1 self.last_context_list = None self.hits_by_context = {} self.session = session self.address_space = session.physical_address_space
def __init__(self, name=None, dtb=None, **kwargs): """Instantiate an Intel 32 bit Address space over the layered AS. Args: dtb: The dtb address. """ super(IA32PagedMemory, self).__init__(**kwargs) # We must be stacked on someone else: if not self.base: raise TypeError("No base Address Space") # If the underlying address space already knows about the dtb we use it. # Allow the dtb to be specified in the session. self.dtb = dtb or self.session.GetParameter("dtb") if not self.dtb != None: raise TypeError("No valid DTB specified. Try the find_dtb" " plugin to search for the dtb.") self.name = (name or 'Kernel AS') + "@%#x" % self.dtb # Use a TLB to make this faster. self._tlb = TranslationLookasideBuffer(1000) # Our get_available_addresses() refers to the base address space we # overlay on. self.phys_base = self.base self._cache = utils.FastStore(100)
def __init__(self, *args, **kwargs): super(IRGlob, self).__init__(*args, **kwargs) self.component_cache = utils.FastStore(50) # By default use the root of the filesystem. if self.plugin_args.root is None: self.plugin_args.root = self.plugin_args.path_sep
def _calculate(self, session): # Try to cache disassembly to speed things up. try: disassembler_cache = self.session.GetParameter( "disassembler_cache", utils.FastStore()) disassembly = disassembler_cache.Get( (self.start, self.length, self.end)) except KeyError: disassembly = str( session.plugins.dis(offset=self.start, branch=True, length=self.length, end=self.end)) disassembler_cache.Put((self.start, self.length, self.end), disassembly) self.session.SetCache("disassembler_cache", disassembler_cache) hits = {} contexts = {} disassembly = disassembly.splitlines() for hit, line in enumerate(disassembly): for rule_idx, context in self._FindRuleIndex(line): hits.setdefault(rule_idx, []).append(hit) contexts.setdefault(rule_idx, {})[hit] = context # All the hits must match if len(hits) < len(self.rules): self.session.logging.error("Failed to find match for %s", self.name) # Add some debugging messages here to make diagnosing errors easier. for i, rule in enumerate(self.text_rules): if i not in hits: self.session.logging.debug("Unable to match rule: %s", rule) return 0 vector, context = self._GetMatch(hits, contexts) if len(vector) < len(self.rules): self.session.logging.error("Failed to find match for %s.", self.name) return 0 self.session.logging.debug("Found match for %s", self.name) for x in vector: self.session.logging.debug(disassembly[x]) return int(context.get("out", "0"), 0)
def __init__(self, **kwargs): self.as_assert(self.base == None, "No base Address Space") self.as_assert(self.base.read(0, 4).lower() in ["hibr", "wake"]) self.runs = [] self.PageDict = {} self.HighestPage = 0 self.PageIndex = 0 self.AddressList = [] self.LookupCache = {} self.PageCache = utils.FastStore(500) self.MemRangeCnt = 0 self.offset = 0 self.entry_count = 0xFF # Modify the profile by adding version specific definitions. self.profile = HibernationSupport(self.profile) # Extract header information self.as_assert(self.profile.has_type("PO_MEMORY_IMAGE"), "PO_MEMORY_IMAGE is not available in profile") self.header = self.profile.Object('PO_MEMORY_IMAGE', offset=0, vm=self.base) self.entry_count = self.profile.get_constant("HibrEntryCount") proc_page = self.profile.get_constant("HibrProcPage") # Check it's definitely a hibernation file self.as_assert(self._get_first_table_page() is not None, "No xpress signature found") # Extract processor state self.ProcState = self.profile.Object("_KPROCESSOR_STATE", offset=proc_page * 4096, vm=base) ## This is a pointer to the page table - any ASs above us dont ## need to search for it. self.dtb = self.ProcState.SpecialRegisters.Cr3.v() # This is a lengthy process, it was cached, but it may be best to delay this # until it's absolutely necessary and/or convert it into a generator... self.build_page_cache() super(WindowsHiberFileSpace, self).__init__(**kwargs)
def __init__(self, *args, **kwargs): super(WinPhysicalYaraScanner, self).__init__(*args, **kwargs) self._context_cache = utils.FastStore(max_size=10000) try: # The user gave a yara DSL rule. if self.plugin_args.yara_expression: self.rules = yara.compile( source=self.plugin_args.yara_expression) self.parsed_rules = yara_support.parse_yara_to_ast( self.plugin_args.yara_expression) # User gave a yara AST. elif self.plugin_args.yara_ast: self.parsed_rules = self.plugin_args.yara_ast self.rules = yara.compile( source=yara_support.ast_to_yara(self.parsed_rules)) else: raise plugin.PluginError("A yara expression must be provided.") all_strings = [] rule_id = 0 for parsed_rule in self.parsed_rules: name = parsed_rule["name"] for k, v in parsed_rule["strings"]: rule_name = "%s_%d_REKALL_%s" % (k, rule_id, name) all_strings.append((rule_name, v)) rule_id += 1 self.parsed_unified_rule = [ dict(name="XX", strings=all_strings, condition="any of them") ] self.plugin_args.unified_yara_expression = ( yara_support.ast_to_yara(self.parsed_unified_rule)) self.unified_rule = yara.compile( source=self.plugin_args.unified_yara_expression) except Exception as e: raise plugin.PluginError("Failed to compile yara expression: %s" % e)
def __init__(self, session=None, address_space=None): self.session = session # This is a cache of tables. We can quickly find the table responsible # for a particular chunk. self.tables = utils.SortedCollection(key=lambda x: x[0]) self._chunk_offset = 0 self.chunk_size = 32 * 1024 # 32kb * 100 = 3.2mb cache size. self.chunk_cache = utils.FastStore(max_size=100) self.address_space = address_space self.profile = EWFProfile(session=session) self.file_header = self.profile.ewf_file_header_v1( offset=0, vm=self.address_space) # Make sure the file signature is correct. if not self.file_header.EVF_sig.is_valid(): raise RuntimeError("EVF signature does not match.") # Now locate all the sections in the file. first_section = self.profile.ewf_section_descriptor_v1( vm=self.address_space, offset=self.file_header.obj_end) for section in first_section.walk_list("next"): if section.type == "header2": self.handle_header2(section) elif section.type == "header": self.handle_header(section) elif section.type in ["disk", "volume"]: self.handle_volume(section) elif section.type == "table": self.handle_table(section) # How many chunks we actually have in this file. self.size = self._chunk_offset * self.chunk_size
def __init__(self, hive_addr=None, profile=None, **kwargs): """Translate between hive addresses and virtual memory addresses. This must be constructed over the kernel virtual memory. Args: hive_addr: The virtual address of the _CMHIVE object. profile: A profile which holds registry symbols. """ super(HiveAddressSpace, self).__init__(**kwargs) self.as_assert(hive_addr, "Hive offset not provided.") self.as_assert(self.base, "Must be layered on kernel address space.") self.profile = RekallRegisteryImplementation( profile or self.session.profile) self.hive = self.profile._CMHIVE(offset=hive_addr, vm=self.base) self.baseblock = self.hive.Hive.BaseBlock.v() self.flat = self.hive.Hive.Flat.v() > 0 self.storage = self.hive.Hive.Storage # This is a quick lookup for blocks. self.block_cache = utils.FastStore(max_size=1000)
def testComponents(self): self.component_cache = utils.FastStore(50) literal = files.LiteralComponent(session=self.session, cache=self.component_cache, component="passwd") path_spec = common.FileSpec("/etc") result = list(literal.filter(path_spec)) self.assertTrue("/etc/passwd" in [unicode(x) for x in result]) regex = files.RegexComponent(session=self.session, cache=self.component_cache, component="pass.+") result = list(regex.filter(path_spec)) self.assertTrue("/etc/passwd" in [unicode(x) for x in result]) recursive = files.RecursiveComponent(session=self.session, cache=self.component_cache, component=".+") result = list(recursive.filter(path_spec)) self.assertTrue("/etc/ssh/ssh_config" in [unicode(x) for x in result])
def calculate(self): return utils.FastStore(50)
def __init__(self, **kwargs): super(CachingAddressSpaceMixIn, self).__init__(**kwargs) self._cache = utils.FastStore(self.CACHE_SIZE)
def __init__(self, max_size=10): self.page_cache = utils.FastStore(max_size)
from rekall import obj from rekall import utils class DynamicParser(object): """A dynamic profile processor base class.""" __metaclass__ = registry.MetaclassRegistry def calculate(self, session): """Returns the expected value or a NoneObject.""" _ = session return obj.NoneObject("No value found") DISASSEMBLER_CACHE = utils.FastStore() class Disassembler(DynamicParser): """A constant generator deriving values based on the disassembler.""" def __init__(self, start=None, end=None, length=100, rules=None): """Derive a value from disassembly. Args: start: Where to start disassembly (Usually a symbol name). end: Where to stop disassembly. length: If end is not specified, we disassemble at most this many bytes.
def __init__(self): self.store = utils.FastStore(10, lock=True)
group="Output control") config.DeclareOption("-q", "--quiet", default=False, type="Boolean", help="Turn off logging to stderr.", group="Output control") config.DeclareOption( "--debug", default=False, type="Boolean", help="If set we break into the debugger on error conditions.") MRO_CACHE = utils.FastStore(100, lock=True) class ObjectRenderer(object): """Baseclass for all TestRenderer object renderers.""" # Fall back renderer for all objects. This can also be a list or tuple of # all types rendered by this renderer. renders_type = "object" # These are the renderers supported by this object renderer. renderers = [] __metaclass__ = registry.MetaclassRegistry # A cache of Renderer, MRO mappings.
def __init__(self): self.store = utils.FastStore(10)
def collect(self): self.component_cache = utils.FastStore(50) for x in self.collect_globs(self.plugin_args.globs): yield dict(path=x)
help="How much information to show. Default is 'concise'.") config.DeclareOption("--logging_level", type="Choices", default="WARNING", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="The default logging level.") config.DeclareOption("--log_domain", type="ChoiceArray", default=[], choices=constants.LOG_DOMAINS, help="Add debug logging to these components.") # A cache to map a tuple (mro, renderer) to the corresponding object renderer. MRO_RENDERER_CACHE = utils.FastStore(100, lock=True) # A cache to map a class to its reduced MRO list. Do not hold class references # in this cache as these capture closure variables via the Curry() classes on # the property methods. MRO_CACHE = utils.FastStore(100, lock=True) class ObjectRenderer(object): """Baseclass for all TestRenderer object renderers.""" # Fall back renderer for all objects. This can also be a list or tuple of # all types rendered by this renderer. renders_type = "object" # These are the renderers supported by this object renderer.
def __init__(self, session=None, renderer=None): self.renderer = renderer self.session = session self.cache = utils.FastStore(100)