Ejemplo n.º 1
0
    def run(self, path):
        self.info = self.myconfig('information', 'history')
        if self.info not in ['history', 'cookies']:
            raise ValueError(
                'Invalid information kind {} to extract for edge artifacts'.
                format(self.info))

        esedbexport = self.config.config['plugins.common'].get(
            'esedbexport', 'esedbexport')

        try:
            webcache_dir = tempfile.mkdtemp(suffix="_WebCacheV0")
            run_command(
                [esedbexport, "-t",
                 os.path.join(webcache_dir, "db"), path],
                stderr=subprocess.DEVNULL)
            self.webcache_dir_export = os.path.join(webcache_dir, "db.export")
            if not os.path.exists(self.webcache_dir_export):
                raise base.job.RVTError(
                    'esedbexport could not create db.export')
            self.get_ids()

            for info in self.parse_export():
                yield info
        finally:
            shutil.rmtree(webcache_dir)

        return []
Ejemplo n.º 2
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/UnifiedLogReader/scripts/UnifiedLogReader.py")
        uuidtext = search.search("/var/db/uuidtext$")
        timesync = search.search("/var/db/diagnostics/timesync$")
        diagnostics = search.search("/var/db/diagnostics$")

        ulr_path = self.myconfig('outdir')
        check_folder(ulr_path)

        if not uuidtext or not timesync or not diagnostics:
            return []

        python3 = '/usr/bin/python3'

        try:
            run_command([
                python3, parser,
                os.path.join(self.myconfig('casedir'), uuidtext[0]),
                os.path.join(self.myconfig('casedir'), timesync[0]),
                os.path.join(self.myconfig('casedir'), diagnostics[0]),
                ulr_path, "-l", "WARNING"
            ])
        except Exception as exc:
            self.logger().error(
                'Problems with UnifiedLogReader.py. Error:'.format(exc))
        self.logger().info("Done parsing UnifiedLogReader")
        return []
Ejemplo n.º 3
0
    def mount_NTFS(self, imagefile=None, mountpath=None, offset=True):
        """ mount NTFS partition

        Confiugration section:
            :ntfs_args: arguments for mount. offset and sizelimit will be automatically appended to these arguments.
                This parameter will be managed as a format string. The current group id will be passed as an option `gid`.

        Args:
            imagefile (str): imagefile path (used for auxiliary mount point). If None, use self.imagefile.
            mountpath (str): mount the image on this path. If None, use `source/mnt/pXX`.
            offset (bool): Used to ignore disk offset (used for auxiliary mount point)
        """
        args = self.myconfig('ntfs_args').format(
            gid=grp.getgrgid(os.getegid())[2])
        if offset and self.obytes != 0:
            args = "%s,offset=%s,sizelimit=%s" % (args, self.obytes, self.size)
        mount = self.myconfig('mount', '/bin/mount')
        if not mountpath:
            mountpath = os.path.join(self.mountdir, "p%s" % self.partition)
        if not imagefile:
            imagefile = self.imagefile
        check_folder(mountpath)
        run_command(
            ["sudo", mount, imagefile, "-t", "ntfs-3g", "-o", args, mountpath],
            logger=self.logger)
Ejemplo n.º 4
0
    def run(self, path=""):

        search = GetFiles(self.config, vss=self.myflag("vss"))
        users = search.search(r"p\d+(/root)?/Users/[^/]+$")
        mru_path = self.myconfig('outdir')
        check_folder(mru_path)

        parser = os.path.join(self.myconfig('rvthome'),
                              "plugins/external/macMRU/macMRU.py")
        python3 = os.path.join(self.myconfig('rvthome'), '.venv/bin/python3')

        for user in users:
            self.logger().info("Extracting MRU info from user {}".format(
                os.path.basename(user)))
            with open(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user)),
                    'w') as f:
                self.logger().debug("Generating file {}".format(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user))))
                run_command([
                    python3, parser,
                    os.path.join(self.myconfig('casedir'), user)
                ],
                            stdout=f)

        self.logger().info("Done parsing MacMRU")
        return []
Ejemplo n.º 5
0
    def run(self, path=""):
        """ Extracts SRUM artifacts of a disk """
        vss = self.myflag('vss')
        SRUM_TEMPLATE = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/SRUM_TEMPLATE2.xlsx")
        srum = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/srum_dump2.py")
        check_file(SRUM_TEMPLATE, error_missing=True)

        Search = GetFiles(self.config, vss=self.myflag("vss"))
        SOFTWARE = list(Search.search('windows/system32/config/SOFTWARE$'))
        SRUDB = list(Search.search('/windows/system32/sru/SRUDB.dat$'))
        python3 = os.path.join(self.myconfig('rvthome'), ".venv/bin/python3")

        out_folder = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(out_folder, create=True)

        if not SRUDB:
            self.logger().info("SRUDB.dat not found in any partition of the disk")
            return []

        for soft in SOFTWARE:
            partition = soft.split('/')[2]
            for srudb in SRUDB:
                if srudb.split('/')[2] == partition:
                    self.logger().info("Parsing SRUDB from partition {}".format(partition))
                    out_file = os.path.join(out_folder, 'srum_{}.xlsx'.format(partition))
                    run_command([python3, srum, "-i", os.path.join(self.myconfig('casedir'), srudb), "-t", SRUM_TEMPLATE,
                                "-r", os.path.join(self.myconfig('casedir'), soft), "-o", out_file], logger=self.logger())

                    self.convert_to_csv(out_folder, partition)
                    os.remove(out_file)
                    break
            else:
                self.logger().info("SRUDB.dat not found in partition: {}".format(partition))

        return []
Ejemplo n.º 6
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/FSEventsParser/FSEParser_V4.0.py")
        fsevents = search.search(r"\.fseventsd$")

        fsevents_path = self.myconfig('outdir')
        check_folder(fsevents_path)

        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        for f in fsevents:
            self.logger().info("Processing file {}".format(f))
            run_command([
                python, parser, "-c", "Report_{}".format(f.split('/')[-2]),
                "-s",
                os.path.join(self.myconfig('casedir'),
                             f), "-t", "folder", "-o", fsevents_path, "-q",
                os.path.join(
                    self.myconfig('rvthome'),
                    "plugins/external/FSEventsParser/report_queries.json")
            ])
            n += 1
        self.logger().info("Done FSEvents")
        return []
Ejemplo n.º 7
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/spotlight_parser/spotlight_parser.py")
        spotlight = search.search(r"/\.spotlight.*/store.db$")

        spotlight_path = self.myconfig('outdir')
        check_folder(spotlight_path)

        # TODO: adapt external spotlight_parser.py script to python3
        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        errorlog = os.path.join(self.myconfig('sourcedir'),
                                "{}_aux.log".format(self.myconfig('source')))
        with open(errorlog, 'a') as logfile:
            for f in spotlight:
                self.logger().info("Processing file {}".format(f))
                run_command([
                    python, parser,
                    os.path.join(self.myconfig('casedir'), f), spotlight_path,
                    "-p",
                    "spot-%s" % str(n)
                ],
                            stdout=logfile,
                            stderr=logfile)
                n += 1
        self.logger().info("Spotlight done")
        return []
Ejemplo n.º 8
0
    def vss_mount(self):
        vshadowmount = self.myconfig('vshadowmount',
                                     '/usr/local/bin/vshadowmount')

        if len(self.vss) > 0:
            vp = os.path.join(self.mountaux, "vp%s" % self.partition)
            if len(self.fuse) == 0 or "/dev/fuse" not in self.fuse.keys():
                check_folder(vp)
                if self.encrypted:
                    run_command([
                        "sudo", vshadowmount, "-X", "allow_root", self.loop, vp
                    ],
                                logger=self.logger)
                else:
                    run_command([
                        vshadowmount, "-X", "allow_root", self.imagefile, "-o",
                        str(self.obytes), vp
                    ],
                                logger=self.logger)
            for p in self.vss.keys():
                if self.vss[p] == "":
                    mp = os.path.join(self.mountdir, p)
                    self.mount_NTFS(imagefile=os.path.join(
                        vp, "vss%s" % p[1:].split("p")[0]),
                                    mountpath=mp,
                                    offset=False)
        self.refreshMountedImages()
Ejemplo n.º 9
0
    def umountPartition(self, path):
        """ Umount path """

        umount = self.myconfig('umount', '/bin/umount')
        time.sleep(1)
        try:
            run_command(["sudo", umount, '-l', path], logger=self.logger)
        except Exception:
            self.logger.error("Error unmounting {}".format(path))
Ejemplo n.º 10
0
def write_registry_file(filename,
                        pluginlist,
                        hivedict,
                        title,
                        regfiles,
                        rip='/opt/regripper/rip.pl',
                        logger=logging,
                        logfile=None):
    """ Generates a report file for a group of related regripper plugins.

    Parameters:
        filename (str): report filename
        pluginlist (list): list of plugins to execute
        hivedict (dict): relates plugin to hive files
        title (str): title of report file
        regfiles (list): list of hive files paths
        rip (str): path to rip.pl executable
        logger (logging): logging instance
        logfile (file): stream related to logfile
    """

    separator = "=" * 105

    with open(filename, "w") as f:
        f.write("{}\n{}\n{}\n\n".format(separator, title, separator))
        for plugin in pluginlist:
            if hivedict[plugin] == ["all"]:
                hivedict[plugin] = [
                    "system", "software", "sam", "ntuser", "usrclass"
                ]
            for hiv in hivedict[plugin]:
                try:
                    if hiv == "ntuser" or hiv == "usrclass":
                        for user in regfiles[hiv].keys():
                            if not regfiles[hiv][user]:
                                continue
                            f.write(
                                "\n************* Extracting from User {} *************\n\n"
                                .format(user))
                            output = run_command(
                                [rip, "-r", regfiles[hiv][user], "-p", plugin],
                                stderr=logfile,
                                logger=logger)
                            f.write("{}\n".format(output))
                    else:
                        if hiv not in regfiles.keys():
                            continue
                        output = run_command(
                            [rip, "-r", regfiles[hiv], "-p", plugin],
                            stderr=logfile,
                            logger=logger)
                        f.write(output)
                    f.write("\n\n{}\n\n".format('.' * 107))
                except Exception as exc:
                    logger.error(exc)
                    continue
Ejemplo n.º 11
0
 def mount_APFS(self):
     apfsmount = self.myconfig('apfsmount', '/usr/local/bin/apfs-fuse')
     mountpath = os.path.join(self.mountaux, "p%s" % self.partition)
     check_folder(mountpath)
     run_command([
         "sudo", apfsmount, "-s",
         str(self.obytes), "-v",
         str(self.voln), self.imagefile, mountpath
     ],
                 logger=self.logger)
     self.bindfs_mount()
Ejemplo n.º 12
0
    def bindfs_mount(self):
        user = getpass.getuser()
        group = grp.getgrgid(os.getegid())[0]

        mountaux = os.path.join(self.mountaux, "p%s" % self.partition)
        check_folder(self.mountpath)
        bindfs = self.myconfig('bindfs', '/usr/bin/bindfs')
        run_command([
            "sudo", bindfs, "-p", "550", "-u", user, "-g", group, mountaux,
            self.mountpath
        ],
                    logger=self.logger)
Ejemplo n.º 13
0
 def mount_fat(self, imagefile=None, mountpath=None, offset=True):
     args = self.myconfig('fat_args').format(
         gid=grp.getgrgid(os.getegid())[0])
     if offset and self.obytes != 0:
         args = "%s,offset=%s,sizelimit=%s" % (args, self.obytes, self.size)
     mount = self.myconfig('mount', '/bin/mount')
     if not mountpath:
         mountpath = os.path.join(self.mountdir, "p%s" % self.partition)
     if not imagefile:
         imagefile = self.imagefile
     check_folder(mountpath)
     run_command(["sudo", mount, self.imagefile, "-o", args, mountpath],
                 logger=self.logger)
Ejemplo n.º 14
0
    def run(self, path=None):
        """ Export all pst and ost files in a mounted image. Path is ignored. """
        pffexport = self.myconfig('pffexport')

        outdir = self.myconfig('outdir')
        base.utils.check_directory(outdir, create=True)

        pst_files = GetFiles(
            self.config, vss=self.myflag("vss")).search(r"\.(pst|ost|nst)$")
        index = 0

        for pst_file in tqdm(pst_files,
                             desc=self.section,
                             disable=self.myflag('progress.disable')):
            index += 1
            # save metadata
            yield dict(filename=pst_file,
                       outdir="pff-{}".format(index),
                       index=index)
            try:
                if not os.path.exists(
                        os.path.join(self.myconfig('casedir'), pst_file)):
                    self.logger().warning('File %s does not exist', pst_file)
                    continue
                out_path = os.path.join(outdir, "pff-{}".format(index))
                self.logger().debug("Exporting %s to %s", pst_file, out_path)
                # check if the output directory exist
                for directory in [
                        '{}.export'.format(out_path),
                        '{}.recovered'.format(out_path)
                ]:
                    if base.utils.check_directory(directory):
                        if self.myflag('delete_exists'):
                            base.utils.check_directory(directory,
                                                       delete_exists=True)
                        else:
                            continue
                run_command([
                    pffexport, '-f', 'text', '-m', 'all', '-q', '-t', out_path,
                    pst_file
                ],
                            stderr=subprocess.DEVNULL,
                            from_dir=self.myconfig('casedir'))
            except Exception as exc:
                if self.myflag('stop_on_error'):
                    self.logger().error('Exception %s: %s',
                                        type(exc).__name__, exc)
                    raise base.job.RVTError(exc)
                else:
                    self.logger().warning('Exception %s: %s',
                                          type(exc).__name__, exc)
Ejemplo n.º 15
0
    def parse_SysCache_hive(self):
        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        # self.tl_file = os.path.join(self.myconfig('timelinesdir'), "%s_BODY.csv" % self.myconfig('source'))
        check_directory(outfolder, create=True)
        SYSC = self.search.search(r"/System Volume Information/SysCache.hve$")

        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')

        for f in SYSC:
            p = f.split('/')[2]
            output_text = run_command([
                ripcmd, "-r",
                os.path.join(self.myconfig('casedir'), f), "-p", "syscache_csv"
            ],
                                      logger=self.logger())
            output_file = os.path.join(outfolder, "syscache_%s.csv" % p)

            self.path_from_inode = FileSystem(
                config=self.config).load_path_from_inode(self.myconfig,
                                                         p,
                                                         vss=self.vss)

            save_csv(self.parse_syscache_csv(p, output_text),
                     outfile=output_file,
                     file_exists='OVERWRITE')

        self.logger().info("Finished extraction from SysCache")
Ejemplo n.º 16
0
 def mount_ext(self):
     mount = self.myconfig('mount', '/bin/mount')
     mountpath = os.path.join(self.mountaux, "p%s" % self.partition)
     check_folder(mountpath)
     args = "%s,sizelimit=%s" % (self.myconfig('ext4_args'), self.size)
     if self.obytes != 0:
         args = "%s,offset=%s,sizelimit=%s" % (self.myconfig('ext4_args'),
                                               self.obytes, self.size)
     try:
         run_command(["sudo", mount, self.imagefile, "-o", args, mountpath],
                     logger=self.logger)
     except Exception:
         args = args + ',norecovery'
         run_command(["sudo", mount, self.imagefile, "-o", args, mountpath],
                     logger=self.logger)
     self.bindfs_mount()
Ejemplo n.º 17
0
    def run(self, path=""):
        """ Get information of hiberfil.sys

        """
        volatility = self.config.get('plugins.common', 'volatility',
                                     '/usr/local/bin/vol.py')

        hiber_path = self.myconfig('outdir')
        check_folder(hiber_path)

        search = GetFiles(self.config, vss=self.myflag("vss"))
        hiberlist = search.search("/hiberfil.sys$")

        for h in hiberlist:
            aux = re.search(
                "{}/([^/]*)/".format(
                    base.utils.relative_path(self.myconfig('mountdir'),
                                             self.myconfig('casedir'))), h)
            partition = aux.group(1)

            hiber_raw = os.path.join(hiber_path,
                                     "hiberfil_{}.raw".format(partition))
            profile, version = self.get_win_profile(partition)
            with open(
                    os.path.join(hiber_path,
                                 "hiberinfo_{}.txt".format(partition)),
                    'w') as pf:
                pf.write("Profile: %s\nVersion: %s" % (profile, version))
            if version.startswith("5") or version.startswith(
                    "6.0") or version.startswith("6.1"):
                self.logger().info("Uncompressing {}".format(h))
                run_command([
                    volatility, "--profile={}".format(profile), "-f",
                    os.path.join(self.myconfig('casedir'), h), "imagecopy",
                    "-O", hiber_raw
                ],
                            logger=self.logger())
            else:
                self.logger().info(
                    "{} files could not be descompressed with a linux distro".
                    format(h))
                self.logger().info(
                    "Descompress with Windows 8 o higher hiberfil.sys file using https://arsenalrecon.com/weapons/hibernation-recon/"
                )
                self.logger().info("save output at {}".format(hiber_raw))
            self.vol_extract(hiber_raw, profile, version)
        return []
Ejemplo n.º 18
0
 def _getRawImagefile(self):
     # convert an Encase image to dd using ewfmount
     fuse_path = os.path.join(self.params('mountauxdir'), "encase")
     imagefile = os.path.join(fuse_path, "ewf1")
     self.auxdirectories.append(fuse_path)
     if not os.path.exists(imagefile):
         ewfmount = self.params('ewfmount', '/usr/bin/ewfmount')
         check_folder(fuse_path)
         try:
             run_command(
                 [ewfmount, self.imagefile, "-X", "allow_root", fuse_path])
         except Exception:
             self.logger.error("Cannot mount Encase imagefile=%s",
                               self.imagefile)
             raise base.job.RVTError(
                 "Cannot mount Encase imagefile={}".format(self.imagefile))
     return imagefile
Ejemplo n.º 19
0
    def mount_bitlocker(self):
        if 'dislocker' in self.fuse.keys():
            self.logger.info("Bitlocker partition p{} already mounted".format(
                self.partition))
            return
        rec_key = self.myconfig('recovery_keys')
        dislocker = self.myconfig('dislocker', '/usr/bin/dislocker')
        mountauxpath = os.path.join(self.mountaux, "p%s" % self.partition)
        check_folder(mountauxpath)
        import time

        if rec_key == "":
            self.logger.warning(
                "Recovery key not available on partition p%s. Trying without key"
                % self.partition)
            try:
                cmd = "sudo {} -c -O {} -V {} -r {}".format(
                    dislocker, self.obytes, self.imagefile, mountauxpath)
                run_command(cmd, logger=self.logger)
                time.sleep(4)
                self.refreshMountedImages()
                self.mount_NTFS(os.path.join(mountauxpath, "dislocker-file"),
                                offset=False)
            except Exception:
                self.logger.error("Problems mounting partition p%s" %
                                  self.partition)
                return -1
        else:
            self.logger.info("Trying to mount with recovery keys at {}".format(
                self.mountaux))
            mountauxpath = os.path.join(self.mountaux, "p%s" % self.partition)
            for rk in rec_key.split(
                    ','):  # loop wih different recovery keys, comma separated
                try:
                    cmd = "sudo {} -p{} -O {} -V {} -r {}".format(
                        dislocker, rk, self.obytes, self.imagefile,
                        mountauxpath)
                    run_command(cmd, logger=self.logger)
                    time.sleep(4)
                    self.refreshMountedImages()
                    self.mount_NTFS(os.path.join(mountauxpath,
                                                 "dislocker-file"),
                                    offset=False)
                    break
                except Exception:
                    pass
Ejemplo n.º 20
0
    def run(self, keyfile=""):
        """
        Searche contents of regex in output dir except in strings, searches and parser folders
        """
        self.logger().info("Searching at output folder")
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)

        grep = self.config.get('plugins.common', 'grep', '/bin/grep')

        skip_folders = ("strings", "parser", "searches")

        self.logger().info("Getting key list from {}".format(keyfile))
        keywords = getSearchItems(keyfile)

        temp_dir = tempfile.mkdtemp('outsearch')
        outdir = self.myconfig('outdir')
        check_directory(outdir, create=True)

        for kw, srch in keywords.items():
            output_file = os.path.join(temp_dir, "outsearch_{}.txt".format(kw))
            with open(output_file, "w") as f:
                f.write(
                    "\nKeyword: {}\n-----------------------------\n\n".format(
                        srch))
                f.flush()

                for item in os.listdir(self.myconfig('outputdir')):
                    folder = os.path.join(self.myconfig('outputdir'), item)
                    if os.path.isdir(folder) and item not in skip_folders:
                        run_command([grep, "-ilR", srch, item],
                                    stdout=f,
                                    from_dir=self.myconfig('outputdir'),
                                    logger=self.logger())

        try:
            for file in os.listdir(temp_dir):
                shutil.copy(os.path.join(temp_dir, file),
                            os.path.join(outdir, file))
        finally:
            shutil.rmtree(temp_dir)

        self.logger().info("OutSearch done")
        return []
Ejemplo n.º 21
0
    def mount_HFS(self, imagefile="", mountpath="", offset=True):
        # TODO: avoid infinite recursion if mount fails after having called fvdemount
        if mountpath == "":
            mountpath = os.path.join(self.mountaux, "p%s" % self.partition)
        if imagefile == "":
            imagefile = self.imagefile

        mount = self.myconfig('mount', '/bin/mount')
        check_folder(mountpath)
        args = "%s,sizelimit=%s" % (self.myconfig('hfs_args'), self.size)
        if offset and self.obytes != 0:
            args = "%s,offset=%s,sizelimit=%s" % (self.myconfig('hfs_args'),
                                                  self.obytes, self.size)
        try:
            run_command(["sudo", mount, imagefile, "-o", args, mountpath],
                        logger=self.logger)
            self.bindfs_mount()
        except Exception:
            self.fvde_mount()
Ejemplo n.º 22
0
 def _getRawImagefile(self):
     fuse_path = os.path.join(self.params('mountauxdir'), "aff")
     imagefile = os.path.join(fuse_path,
                              "%s.raw" % os.path.basename(self.imagefile))
     self.auxdirectories.append(fuse_path)
     if not os.path.exists(imagefile):
         affuse = self.params('affuse', '/usr/bin/affuse')
         check_folder(fuse_path)
         try:
             run_command(["sudo", affuse, self.imagefile, fuse_path])
             fuse_path = os.path.join(self.params('mountauxdir'), "aff")
             imagefile = os.path.join(
                 fuse_path, "%s.raw" % os.path.basename(self.imagefile))
         except Exception:
             self.logger.error("Cannot mount AFF imagefile=%s",
                               self.imagefile)
             raise base.job.RVTError("Cannot mount AFF imagefile={}".format(
                 self.imagefile))
     return imagefile
Ejemplo n.º 23
0
 def fvde_mount(self):
     self.logger.debug('Obtaining encrypted partition')
     fvdemount = self.myconfig('fvdemount', '/usr/local/bin/fvdemount')
     password = self.myconfig('password')
     mountpoint = os.path.join(self.mountaux, "vp%s" % self.partition)
     check_folder(mountpoint)
     # TODO: get 'EncryptedRoot.plist.wipekey' from recovery partition: https://github.com/libyal/libfvde/wiki/Mounting
     encryptedfile = os.path.join(self.myconfig('sourcedir'),
                                  'EncryptedRoot.plist.wipekey')
     run_command([
         'sudo', fvdemount, "-e", encryptedfile, "-p", password, "-X",
         "allow_root", "-o",
         str(self.obytes), self.imagefile, mountpoint
     ],
                 logger=self.logger)
     time.sleep(2)  # let it do his work
     self.mount_HFS(imagefile=os.path.join(mountpoint, 'fvde1'),
                    mountpath=os.path.join(self.mountaux,
                                           "p%s" % self.partition),
                    offset=False)
Ejemplo n.º 24
0
    def generate(self, evtx_path):
        """ Auxiliary function """

        check_directory(evtx_path, create=True)
        evtx = self.config.get('plugins.common', 'evtxdump',
                               '/usr/local/bin/evtxdump.pl')

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))
        if self.vss:
            evtx_files = alloc_files.search(r"{}.*\.evtx$".format(
                evtx_path.split('/')[-1]))
        else:
            evtx_files = alloc_files.search(r"\.evtx$")

        errorlog = self.myconfig(
            'errorlog',
            os.path.join(self.myconfig('sourcedir'),
                         "{}_aux.log".format(self.myconfig('source'))))

        for i in evtx_files:
            evtx_file = os.path.join(self.myconfig('casedir'), i)
            if not check_file(evtx_file):
                self.logger().warning('File %s does not exist', evtx_file)
                continue
            self.logger().info("Parsing {}".format(i))
            name = os.path.join(evtx_path, os.path.basename(i))[:-4] + "txt"

            # if the output already exists, continue
            if check_file(name):
                self.logger().debug(
                    'The output file %s ready exists. Skipping', name)
                continue

            with open(name, "wb") as f:
                with open(errorlog, 'a') as logfile:
                    run_command([evtx, evtx_file],
                                stdout=f,
                                stderr=logfile,
                                logger=self.logger())
Ejemplo n.º 25
0
    def run(self, path=""):
        """ Parses activities cache

        """

        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.logger().info("Parsing Activities Cache files")
        vss = self.myflag('vss')

        if vss:
            base_path = self.myconfig('voutdir')
        else:
            base_path = self.myconfig('outdir')
        check_folder(base_path)

        activities = self.search.search("/ConnectedDevicesPlatform/.*/ActivitiesCache.db$")

        activities_cache_parser = self.myconfig('activities_cache_parser', os.path.join(self.myconfig('rvthome'), '.venv/bin/winactivities2json.py'))
        python3 = self.myconfig('python3', os.path.join(self.myconfig('rvthome'), '.venv/bin/python3'))

        for act in activities:
            with open(os.path.join(base_path, '{}_activitycache_{}.json'.format(act.split('/')[2], act.split('/')[-2])), 'w') as out_file:
                run_command([python3, activities_cache_parser, '-s', act], from_dir=self.myconfig('casedir'), stdout=out_file)
        return []
Ejemplo n.º 26
0
def searchCountRegex(regex, string_path, grep='grep', logger=logging):
    """ Return number of times a hit appears

    Args:
        regex (str): regular expression to seek
    Returns:
        dict: dict with number of times a hit appears
        """

    data = Counter()

    for f in os.listdir(string_path):
        try:
            text = run_command(
                [grep, "-oP", regex,
                 os.path.join(string_path, f)],
                logger=logger)
        except Exception:  # no hits
            continue
        for hit in text.split("\n"):
            data[hit] += 1

    return data
Ejemplo n.º 27
0
    def parse_ShimCache_hive(self, sysfile):
        """ Launch shimcache regripper plugin and parse results """
        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')
        date_regex = re.compile(
            r'\w{3}\s\w{3}\s+\d+\s\d{2}:\d{2}:\d{2}\s\d{4} Z')

        res = run_command([
            ripcmd, "-r",
            os.path.join(self.myconfig('casedir'), sysfile), "-p", "shimcache"
        ],
                          logger=self.logger())
        for line in res.split('\n'):
            if ':' not in line[:4]:
                continue
            matches = re.search(date_regex, line)
            if matches:
                path = line[:matches.span()[0] - 2]
                date = str(
                    datetime.datetime.strptime(matches.group(),
                                               '%a %b %d %H:%M:%S %Y Z'))
                executed = bool(len(line[matches.span()[1]:]))
                yield OrderedDict([('LastModified', date), ('AppPath', path),
                                   ('Executed', executed)])
Ejemplo n.º 28
0
 def umount(self, unzip_path=None):
     super().umount()
     # unmount auxiliary images (encase and aff4)
     umount = self.params('umount', '/bin/umount')
     for mp in self.auxdirectories:
         run_command(["sudo", umount, '-l', mp])
Ejemplo n.º 29
0
    def run(self, path=None):
        """ The path is ignored, and the source image is used. """
        vss = self.myflag('vss')
        fls = self.myconfig('fls', 'fls')
        apfs_fls = self.myconfig('apfs_fls', 'fls')
        mactime = self.myconfig('mactime', 'mactime')

        disk = getSourceImage(self.myconfig)

        tl_path = self.myconfig('outdir')
        if vss:
            tl_path = self.myconfig('voutdir')

        check_folder(tl_path)

        if not vss:
            self.logger().info("Generating BODY file for %s", disk.disknumber)
            body = os.path.join(tl_path, "{}_BODY.csv".format(disk.disknumber))

            # create the body file
            with open(body, "wb") as f:
                for p in disk.partitions:
                    mountpath = base.utils.relative_path(
                        p.mountpath, self.myconfig('casedir'))

                    if not p.isMountable:
                        continue
                    if not disk.sectorsize:
                        # unkwown sector size
                        run_command([
                            fls, "-s", "0", "-m", mountpath, "-r", "-o",
                            str(p.osects), "-i", "raw", disk.imagefile
                        ],
                                    stdout=f,
                                    logger=self.logger())
                    elif p.filesystem == "NoName":
                        # APFS filesystems are identified as NoName, according to our experience
                        try:
                            run_command([
                                apfs_fls, "-B",
                                str(p.block_number), "-s", "0", "-m",
                                mountpath, "-r", "-o",
                                str(p.osects), "-b",
                                str(disk.sectorsize), "-i", "raw",
                                disk.imagefile
                            ],
                                        stdout=f,
                                        logger=self.logger())
                        except Exception:
                            # sometimes, APFS filesystems report a wrong offset. Try again with offset*8
                            run_command([
                                apfs_fls, "-B",
                                str(p.block_number), "-s", "0", "-m",
                                mountpath, "-r", "-o",
                                str(p.osects * 8), "-b",
                                str(disk.sectorsize), "-i", "raw",
                                disk.imagefile
                            ],
                                        stdout=f,
                                        logger=self.logger())
                    else:
                        # we know the sector size
                        if p.encrypted:
                            run_command([
                                fls, "-s", "0", "-m", mountpath, "-r", "-b",
                                str(disk.sectorsize), p.loop
                            ],
                                        stdout=f,
                                        logger=self.logger())
                        else:
                            run_command([
                                fls, "-s", "0", "-m", mountpath, "-r", "-o",
                                str(p.osects), "-b",
                                str(disk.sectorsize), disk.imagefile
                            ],
                                        stdout=f,
                                        logger=self.logger())

            # create the timeline using mactime
            self.logger().info("Creating timeline of {}".format(
                disk.disknumber))
            hsum = os.path.join(tl_path, "%s_hour_sum.csv" % disk.disknumber)
            fcsv = os.path.join(tl_path, "%s_TL.csv" % disk.disknumber)
            with open(fcsv, "wb") as f:
                run_command([
                    mactime, "-b", body, "-m", "-y", "-d", "-i", "hour", hsum
                ],
                            stdout=f,
                            logger=self.logger())
            run_command(['sed', '-i', '1,2d',
                         hsum])  # Delete header because full path is included
        else:
            # generate body and timeline for each VSS in the disk
            for p in disk.partitions:
                for v, dev in p.vss.items():
                    if dev != "":
                        self.logger().info(
                            "Generating BODY file for {}".format(v))
                        body = os.path.join(tl_path, "{}_BODY.csv".format(v))

                        with open(body, "wb") as f:
                            mountpath = base.utils.relative_path(
                                p.mountpath, self.myconfig('casedir'))
                            run_command([
                                fls, "-s", "0", "-m",
                                "%s" % mountpath, "-r", dev
                            ],
                                        stdout=f,
                                        logger=self.logger())

                        self.logger().info(
                            "Creating timeline for {}".format(v))
                        hsum = os.path.join(tl_path, "%s_hour_sum.csv" % v)
                        fcsv = os.path.join(tl_path, "%s_TL.csv" % v)
                        with open(fcsv, "wb") as f:
                            run_command([
                                mactime, "-b", body, "-m", "-y", "-d", "-i",
                                "hour", hsum
                            ],
                                        stdout=f,
                                        logger=self.logger())
                        run_command([
                            'sed', '-i', '1,2d', hsum
                        ])  # Delete header because full path is included

        self.logger().info("Timelines generation done!")
        return []
Ejemplo n.º 30
0
    def report_search_kw(self, keyword, regex):
        """ Creates a pdf file from 'all_kw' file, using LaTex.

        Parameters:
            keyword (str): keyword name
            regex (str): regular expression associated to keyword
        """

        # TODO: do not break lines. Use lstlisting or something else
        pdflatex = self.myconfig('pdflatex', '/usr/bin/pdflatex')

        search_path = self.myconfig('search_dir')
        check_directory(search_path, error_missing=True)
        report_path = self.myconfig('outdir')
        check_directory(report_path, create=True)

        kw_utf8 = ''.join([i + '.' for i in keyword])
        # Avoid LaTeX special characters
        replaces = [(u'\ufffd', "."), ("\\", "/"), (r"{", "("), (r"]", ")"),
                    (r"$", "\\$"), (r"_", "\\_"), (r"%", "\\%"), (r"}", ")"),
                    (r"^", "."), (r"#", "\\#"), (r"~", "."), ("&", "\\&"),
                    ('"', "'"), (r"€", "euro")]
        line_width = 68  # number of characters per line in tex file

        for file in os.listdir(search_path):
            if not file.startswith("all_{}".format(keyword)):
                continue
            self.logger().info('Creating file {}'.format(file + '.pdf'))

            with open(os.path.join(report_path, file + ".tex"),
                      "w") as foutput:

                foutput.write(
                    "\\documentclass[a4paper,11pt,oneside]{report}\n\\usepackage[spanish]{babel}\n"
                )
                foutput.write("\\usepackage[utf8]{inputenc}\n")
                foutput.write("\\usepackage[pdftex]{color,graphicx}\n")
                foutput.write("\\usepackage[pdftex,colorlinks]{hyperref}\n")
                foutput.write("\\usepackage{fancyvrb}\n")
                foutput.write("\\usepackage{eurosym}\n")
                foutput.write("\\usepackage{listings}\n")
                foutput.write(
                    "\\lstset{breakatwhitespace=false,breaklines=true,frame=single}\n"
                )
                foutput.write("\\UseRawInputEncoding\n")
                foutput.write("\\begin{document}\n\n")
                foutput.write(
                    "\\section*{blindsearches in disk. Keyword:  \\emph{" +
                    keyword + "}}\n")
                initial = True

                if os.path.getsize(os.path.join(search_path, file)) == 0:
                    foutput.write("\\end{document}\n")
                    continue

                with open(os.path.join(search_path, file), "rb") as finput:
                    for line in finput:
                        line = line.decode("iso8859-15", "replace")
                        for r in replaces:
                            line = line.replace(r[0], r[1])

                        if line.startswith('Pt: p'):  # Block information
                            foutput.write(
                                "\\end{Verbatim}\n\n" if not initial else "")
                            foutput.write("\\newpage\n" if not initial else "")
                            initial = False
                            foutput.write("\\begin{lstlisting}\n")
                            foutput.write(line)
                            foutput.write("\\end{lstlisting}\n")
                            foutput.write(
                                "\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n")
                            continue

                        line = re.sub("[\x00-\x09\x0B-\x1F\x7F-\xFF]", ".",
                                      line)
                        # Write by chuncks. Note: Some hits may be missed this way
                        for chunk_line in [
                                line[i:i + line_width]
                                for i in range(0, len(line), line_width)
                        ]:
                            chunk_line = re.sub('({})'.format(regex),
                                                r"\\colorbox{green}{" + r'\1' +
                                                r"}",
                                                chunk_line,
                                                flags=re.I | re.M)
                            chunk_line = re.sub('({})'.format(kw_utf8),
                                                r"\\colorbox{green}{" + r'\1' +
                                                r"}",
                                                chunk_line,
                                                flags=re.I | re.M)
                            foutput.write(chunk_line + "\n")

                foutput.write("\\end{Verbatim}\n")
                foutput.write("\\end{document}\n")

            run_command(
                [pdflatex, "-output-directory", report_path, file + ".tex"],
                logger=self.logger())
            break

        else:
            self.logger().warning(
                'No file: all_{}. Perhaps there is no match for the keyword'.
                format(keyword))

        for file in os.listdir(report_path):
            if file.endswith(".log") or file.endswith(".tex") or file.endswith(
                    ".aux") or file.endswith(".toc") or file.endswith(
                        ".out") or file.endswith(".synctex.gz"):
                os.remove(os.path.join(report_path, file))