Пример #1
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/FSEventsParser/FSEParser_V4.0.py")
        fsevents = search.search(r"\.fseventsd$")

        fsevents_path = self.myconfig('outdir')
        check_folder(fsevents_path)

        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        for f in fsevents:
            self.logger().info("Processing file {}".format(f))
            run_command([
                python, parser, "-c", "Report_{}".format(f.split('/')[-2]),
                "-s",
                os.path.join(self.myconfig('casedir'),
                             f), "-t", "folder", "-o", fsevents_path, "-q",
                os.path.join(
                    self.myconfig('rvthome'),
                    "plugins/external/FSEventsParser/report_queries.json")
            ])
            n += 1
        self.logger().info("Done FSEvents")
        return []
Пример #2
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/UnifiedLogReader/scripts/UnifiedLogReader.py")
        uuidtext = search.search("/var/db/uuidtext$")
        timesync = search.search("/var/db/diagnostics/timesync$")
        diagnostics = search.search("/var/db/diagnostics$")

        ulr_path = self.myconfig('outdir')
        check_folder(ulr_path)

        if not uuidtext or not timesync or not diagnostics:
            return []

        python3 = '/usr/bin/python3'

        try:
            run_command([
                python3, parser,
                os.path.join(self.myconfig('casedir'), uuidtext[0]),
                os.path.join(self.myconfig('casedir'), timesync[0]),
                os.path.join(self.myconfig('casedir'), diagnostics[0]),
                ulr_path, "-l", "WARNING"
            ])
        except Exception as exc:
            self.logger().error(
                'Problems with UnifiedLogReader.py. Error:'.format(exc))
        self.logger().info("Done parsing UnifiedLogReader")
        return []
Пример #3
0
    def run(self, path=""):
        """ Extracts SRUM artifacts of a disk """
        vss = self.myflag('vss')
        SRUM_TEMPLATE = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/SRUM_TEMPLATE2.xlsx")
        srum = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/srum_dump2.py")
        check_file(SRUM_TEMPLATE, error_missing=True)

        Search = GetFiles(self.config, vss=self.myflag("vss"))
        SOFTWARE = list(Search.search('windows/system32/config/SOFTWARE$'))
        SRUDB = list(Search.search('/windows/system32/sru/SRUDB.dat$'))
        python3 = os.path.join(self.myconfig('rvthome'), ".venv/bin/python3")

        out_folder = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(out_folder, create=True)

        if not SRUDB:
            self.logger().info("SRUDB.dat not found in any partition of the disk")
            return []

        for soft in SOFTWARE:
            partition = soft.split('/')[2]
            for srudb in SRUDB:
                if srudb.split('/')[2] == partition:
                    self.logger().info("Parsing SRUDB from partition {}".format(partition))
                    out_file = os.path.join(out_folder, 'srum_{}.xlsx'.format(partition))
                    run_command([python3, srum, "-i", os.path.join(self.myconfig('casedir'), srudb), "-t", SRUM_TEMPLATE,
                                "-r", os.path.join(self.myconfig('casedir'), soft), "-o", out_file], logger=self.logger())

                    self.convert_to_csv(out_folder, partition)
                    os.remove(out_file)
                    break
            else:
                self.logger().info("SRUDB.dat not found in partition: {}".format(partition))

        return []
Пример #4
0
    def get_evtx(self, path, regex_search):
        """ Retrieve the evtx file to parse.
        Take 'path' if is defined and exists.
        Otherwise take first coincidence of the corresponding evtx file in the filesystem

        Attrs:
            path: path to evtx as defined in job
            regex_search: regex expression to search in file system allocated files

        """
        if path:
            if os.path.exists(path):
                return path
            else:
                raise base.job.RVTError('path {} does not exist'.format(path))

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))

        evtx_files = alloc_files.search(regex_search)
        if len(evtx_files) < 1:
            self.logger().info("{} matches not found in filesystem".format(regex_search))
            return ''
        if len(evtx_files) > 1:
            self.logger().warning("More than one file matches {}. Only parsing the file {}".format(regex_search, evtx_files[0]))

        return os.path.join(self.myconfig('casedir'), evtx_files[0])
Пример #5
0
    def run(self, path=""):
        """ Main function to extract quick look information

        """

        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        ql_path = self.myconfig("outdir")

        check_folder(ql_path)

        search = GetFiles(self.config, vss=self.myflag("vss"))

        ql_list = search.search("QuickLook.thumbnailcache$")

        for i in ql_list:
            self.logger().info("Extracting quicklook data from {}".format(i))
            out_path = os.path.join(ql_path, i.split("/")[-3])
            if not os.path.isdir(out_path):
                os.mkdir(out_path)
            quicklook_parser_v_3_5mod.process_database(
                os.path.join(self.myconfig('casedir'), i), out_path)
        self.logger().info("Done QuickLook")
        return []
Пример #6
0
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing ShimCache from registry")

        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        SYSTEM = list(self.search.search(r"windows/System32/config/SYSTEM$"))
        check_directory(outfolder, create=True)

        partition_list = set()
        for f in SYSTEM:
            aux = re.search(r"([vp\d]*)/windows/System32/config", f, re.I)
            partition_list.add(aux.group(1))

        output_files = {
            p: os.path.join(outfolder, "shimcache_%s.csv" % p)
            for p in partition_list
        }

        for f in SYSTEM:
            save_csv(self.parse_ShimCache_hive(f),
                     outfile=output_files[f.split("/")[2]],
                     file_exists='OVERWRITE',
                     quoting=0)

        self.logger().info("Finished extraction from ShimCache")
        return []
Пример #7
0
    def run(self, path=""):

        search = GetFiles(self.config, vss=self.myflag("vss"))
        users = search.search(r"p\d+(/root)?/Users/[^/]+$")
        mru_path = self.myconfig('outdir')
        check_folder(mru_path)

        parser = os.path.join(self.myconfig('rvthome'),
                              "plugins/external/macMRU/macMRU.py")
        python3 = os.path.join(self.myconfig('rvthome'), '.venv/bin/python3')

        for user in users:
            self.logger().info("Extracting MRU info from user {}".format(
                os.path.basename(user)))
            with open(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user)),
                    'w') as f:
                self.logger().debug("Generating file {}".format(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user))))
                run_command([
                    python3, parser,
                    os.path.join(self.myconfig('casedir'), user)
                ],
                            stdout=f)

        self.logger().info("Done parsing MacMRU")
        return []
Пример #8
0
    def run(self, path=""):
        vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=vss)

        outfolder = self.myconfig('voutdir') if vss else self.myconfig(
            'outdir')
        check_directory(outfolder, create=True)

        amcache_hives = [path] if path else self.search.search("Amcache.hve$")
        for am_file in amcache_hives:
            self.amcache_path = os.path.join(self.myconfig('casedir'), am_file)
            partition = am_file.split("/")[2]
            self.logger().info("Parsing {}".format(am_file))
            self.outfile = os.path.join(outfolder,
                                        "amcache_{}.csv".format(partition))

            try:
                reg = Registry.Registry(
                    os.path.join(self.myconfig('casedir'), am_file))
                entries = self.parse_amcache_entries(reg)
                save_csv(entries,
                         outfile=self.outfile,
                         file_exists='OVERWRITE',
                         quoting=0)
            except KeyError:
                self.logger().warning(
                    "Expected subkeys not found in hive file: {}".format(
                        am_file))
            except Exception as exc:
                self.logger().warning("Problems parsing: {}. Error: {}".format(
                    am_file, exc))

        self.logger().info("Amcache.hve parsing finished")
        return []
Пример #9
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/spotlight_parser/spotlight_parser.py")
        spotlight = search.search(r"/\.spotlight.*/store.db$")

        spotlight_path = self.myconfig('outdir')
        check_folder(spotlight_path)

        # TODO: adapt external spotlight_parser.py script to python3
        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        errorlog = os.path.join(self.myconfig('sourcedir'),
                                "{}_aux.log".format(self.myconfig('source')))
        with open(errorlog, 'a') as logfile:
            for f in spotlight:
                self.logger().info("Processing file {}".format(f))
                run_command([
                    python, parser,
                    os.path.join(self.myconfig('casedir'), f), spotlight_path,
                    "-p",
                    "spot-%s" % str(n)
                ],
                            stdout=logfile,
                            stderr=logfile)
                n += 1
        self.logger().info("Spotlight done")
        return []
Пример #10
0
    def run(self, path=""):
        """ Parses lnk files, jumlists and customdestinations

        """
        self.logger().info("Extraction of lnk files")

        self.Files = GetFiles(self.config, vss=self.myflag("vss"))
        self.filesystem = FileSystem(self.config)
        self.mountdir = self.myconfig('mountdir')

        lnk_path = self.myconfig('{}outdir'.format('v' if self.vss else ''))
        check_folder(lnk_path)

        users = get_user_list(self.mountdir, self.vss)
        artifacts = {
            'lnk': {
                'filename': "{}_lnk.csv",
                'regex': r"{}/.*\.lnk$",
                'function': self.lnk_parser
            },
            'autodest': {
                'filename': "{}_jl.csv",
                'regex': r"{}/.*\.automaticDestinations-ms$",
                'function': self.automaticDest_parser
            },
            'customdest': {
                'filename': "{}_jlcustom.csv",
                'regex': r"{}/.*\.customDestinations-ms$",
                'function': self.customDest_parser
            }
        }

        for user in users:
            usr = "******".format(user.split("/")[0], user.split("/")[2])

            for a_name, artifact in artifacts.items():
                out_file = os.path.join(lnk_path,
                                        artifact['filename'].format(usr))
                files_list = list(
                    self.Files.search(artifact['regex'].format(user)))
                self.logger().info(
                    "Founded {} {} files for user {} at {}".format(
                        len(files_list), a_name,
                        user.split("/")[-1],
                        user.split("/")[0]))
                if len(files_list) > 0:
                    save_csv(artifact['function'](files_list),
                             config=self.config,
                             outfile=out_file,
                             quoting=0,
                             file_exists='OVERWRITE')
                    self.logger().info(
                        "{} extraction done for user {} at {}".format(
                            a_name,
                            user.split("/")[-1],
                            user.split("/")[0]))

        self.logger().info("RecentFiles extraction done")
        return []
Пример #11
0
    def ProcessActiveDirectoryPlist(self):
        '''
        Extract active directory artifacts

        Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        network_paths = search.search(
            "/Library/Preferences/OpenDirectory/Configurations/Active Directory$"
        )

        out = open(
            os.path.join(self.myconfig('outdir'),
                         'Domain_ActiveDirectory.csv'), 'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "node name", "trustaccount", "trustkerberosprincipal", "trusttype",
            "allow multi-domain", "cache last user logon", "domain", "forest",
            "trust domain", "source"
        ]
        writer.writerow(headers)

        for plist_path in network_paths:
            active_directory = {'source': plist_path}
            for archive in sorted(
                    os.listdir(
                        os.path.join(self.myconfig('casedir'), plist_path))):
                plist = biplist.readPlist(
                    os.path.join(self.myconfig('casedir'), plist_path,
                                 archive))
                try:
                    for item, value in plist.items():
                        if item in [
                                'node name', 'trustaccount',
                                'trustkerberosprincipal', 'trusttype'
                        ]:
                            active_directory[item] = value
                    ad_dict = plist['module options']['ActiveDirectory']
                    for item, value in ad_dict.items():
                        if item in [
                                'allow multi-domain', 'cache last user logon',
                                'domain', 'forest', 'trust domain'
                        ]:
                            active_directory[item] = value
                except Exception:
                    self.logger().error('Error reading plist %s' %
                                        os.path.join(plist_path, archive))
                writer.writerow([active_directory[d] for d in headers])
        out.close()
        return []
Пример #12
0
    def run(self, path=""):
        search = GetFiles(self.config, vss=self.myflag("vss"))
        nusage = search.search("/netusage.sqlite$")
        output = os.path.join(self.myconfig('outdir'), "network_usage.txt")

        with open(output, "w") as out:
            for k in nusage:
                self.logger().info(
                    "Extracting information of file {}".format(k))
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str
                    c = conn.cursor()

                    out.write(
                        "{}\n------------------------------------------\n".
                        format(k))
                    query = '''SELECT pk.z_name as item_type, na.zidentifier as item_name, na.zfirsttimestamp as first_seen_date, na.ztimestamp as last_seen_date,
rp.ztimestamp as rp_date, rp.zbytesin, rp.zbytesout FROM znetworkattachment as na LEFT JOIN z_primarykey pk ON na.z_ent = pk.z_ent
LEFT JOIN zliverouteperf rp ON rp.zhasnetworkattachment = na.z_pk ORDER BY pk.z_name, zidentifier, rp_date desc;'''.replace(
                        '\n', ' ').upper()
                    c.execute(query)

                    out.write(
                        "\n\nitem_type|item_name|first_seen_date|last_seen_date|rp_date|ZBYTESIN|ZBYTESOUT\n--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6]))

                    query = '''SELECT pk.z_name as item_type ,p.zprocname as process_name, p.zfirsttimestamp as first_seen_date, p.ztimestamp as last_seen_date,
lu.ztimestamp as usage_since, lu.zwifiin, lu.zwifiout, lu.zwiredin, lu.zwiredout, lu.zwwanin, lu.zwwanout FROM zliveusage lu
LEFT JOIN zprocess p ON p.z_pk = lu.zhasprocess LEFT JOIN z_primarykey pk ON p.z_ent = pk.z_ent ORDER BY process_name;'''.replace(
                        '\n', ' ').upper()
                    c.execute(query)

                    out.write(
                        "\n\nitem_type|process_name|first_seen_date|last_seen_date|usage_since|ZWIFIIN|ZWIFIOUT|ZWIREDIN|ZWIREDOUT|ZWWANIN|ZWANOUT\n--|--|--|--|--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                            i[8], i[9], i[10]))
                    out.write("\n")
                    c.close()

        self.logger().info("Done parsing netusage.sqlite")
        return []
Пример #13
0
    def run(self, path=""):
        self.vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.outfolder = self.myconfig(
            'voutdir') if self.vss else self.myconfig('outdir')
        check_directory(self.outfolder, create=True)

        self.logger().info(
            "Parsing artifacts from scheduled tasks files (.job)")
        self.parse_Task()
        self.logger().info(
            "Parsing artifacts from Task Scheduler Service log files (schedlgu.txt)"
        )
        self.parse_schedlgu()
        return []
Пример #14
0
class ActivitiesCache(base.job.BaseModule):

    def run(self, path=""):
        """ Parses activities cache

        """

        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.logger().info("Parsing Activities Cache files")
        vss = self.myflag('vss')

        if vss:
            base_path = self.myconfig('voutdir')
        else:
            base_path = self.myconfig('outdir')
        check_folder(base_path)

        activities = self.search.search("/ConnectedDevicesPlatform/.*/ActivitiesCache.db$")

        activities_cache_parser = self.myconfig('activities_cache_parser', os.path.join(self.myconfig('rvthome'), '.venv/bin/winactivities2json.py'))
        python3 = self.myconfig('python3', os.path.join(self.myconfig('rvthome'), '.venv/bin/python3'))

        for act in activities:
            with open(os.path.join(base_path, '{}_activitycache_{}.json'.format(act.split('/')[2], act.split('/')[-2])), 'w') as out_file:
                run_command([python3, activities_cache_parser, '-s', act], from_dir=self.myconfig('casedir'), stdout=out_file)
        return []
Пример #15
0
    def run(self, path=""):
        """ Get information of hiberfil.sys

        """
        volatility = self.config.get('plugins.common', 'volatility',
                                     '/usr/local/bin/vol.py')

        hiber_path = self.myconfig('outdir')
        check_folder(hiber_path)

        search = GetFiles(self.config, vss=self.myflag("vss"))
        hiberlist = search.search("/hiberfil.sys$")

        for h in hiberlist:
            aux = re.search(
                "{}/([^/]*)/".format(
                    base.utils.relative_path(self.myconfig('mountdir'),
                                             self.myconfig('casedir'))), h)
            partition = aux.group(1)

            hiber_raw = os.path.join(hiber_path,
                                     "hiberfil_{}.raw".format(partition))
            profile, version = self.get_win_profile(partition)
            with open(
                    os.path.join(hiber_path,
                                 "hiberinfo_{}.txt".format(partition)),
                    'w') as pf:
                pf.write("Profile: %s\nVersion: %s" % (profile, version))
            if version.startswith("5") or version.startswith(
                    "6.0") or version.startswith("6.1"):
                self.logger().info("Uncompressing {}".format(h))
                run_command([
                    volatility, "--profile={}".format(profile), "-f",
                    os.path.join(self.myconfig('casedir'), h), "imagecopy",
                    "-O", hiber_raw
                ],
                            logger=self.logger())
            else:
                self.logger().info(
                    "{} files could not be descompressed with a linux distro".
                    format(h))
                self.logger().info(
                    "Descompress with Windows 8 o higher hiberfil.sys file using https://arsenalrecon.com/weapons/hibernation-recon/"
                )
                self.logger().info("save output at {}".format(hiber_raw))
            self.vol_extract(hiber_raw, profile, version)
        return []
Пример #16
0
class ShimCache(base.job.BaseModule):
    """ Extracts ShimCache information from registry hives. """

    # TODO: .sdb shim database files (ex: Windows/AppPatch/sysmain.sdb)

    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing ShimCache from registry")

        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        SYSTEM = list(self.search.search(r"windows/System32/config/SYSTEM$"))
        check_directory(outfolder, create=True)

        partition_list = set()
        for f in SYSTEM:
            aux = re.search(r"([vp\d]*)/windows/System32/config", f, re.I)
            partition_list.add(aux.group(1))

        output_files = {
            p: os.path.join(outfolder, "shimcache_%s.csv" % p)
            for p in partition_list
        }

        for f in SYSTEM:
            save_csv(self.parse_ShimCache_hive(f),
                     outfile=output_files[f.split("/")[2]],
                     file_exists='OVERWRITE',
                     quoting=0)

        self.logger().info("Finished extraction from ShimCache")
        return []

    def parse_ShimCache_hive(self, sysfile):
        """ Launch shimcache regripper plugin and parse results """
        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')
        date_regex = re.compile(
            r'\w{3}\s\w{3}\s+\d+\s\d{2}:\d{2}:\d{2}\s\d{4} Z')

        res = run_command([
            ripcmd, "-r",
            os.path.join(self.myconfig('casedir'), sysfile), "-p", "shimcache"
        ],
                          logger=self.logger())
        for line in res.split('\n'):
            if ':' not in line[:4]:
                continue
            matches = re.search(date_regex, line)
            if matches:
                path = line[:matches.span()[0] - 2]
                date = str(
                    datetime.datetime.strptime(matches.group(),
                                               '%a %b %d %H:%M:%S %Y Z'))
                executed = bool(len(line[matches.span()[1]:]))
                yield OrderedDict([('LastModified', date), ('AppPath', path),
                                   ('Executed', executed)])
Пример #17
0
class Bits(base.job.BaseModule):
    """ Parse Background Intelligent Transfer Service. """
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing Bits database")
        self.parse_BITS()
        return []

    def parse_BITS(self):
        if self.vss:
            base_path = self.myconfig('voutdir')
            bitsdb = self.search.search(
                r"v\d+p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$"
            )
        else:
            base_path = self.myconfig('outdir')
            bitsdb = self.search.search(
                r"p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$")
        check_directory(base_path, create=True)

        fields = OrderedDict([('job_id', None), ('name', None), ('desc', None),
                              ('type', None), ('priority', None),
                              ('sid', None), ('state', None), ('cmd', None),
                              ('args', None), ('file_count', 0),
                              ('file_id', 0), ('dest_fn', None),
                              ('src_fn', None), ('tmp_fn', None),
                              ('download_size', -1), ('transfer_size', -1),
                              ('drive', None), ('vol_guid', None),
                              ('ctime', None), ('mtime', None),
                              ('other_time0', None), ('other_time1', None),
                              ('other_time2', None), ('carved', False)])

        for f in bitsdb:
            analyzer = bits.Bits.load_file(
                os.path.join(self.myconfig('casedir'), f))
            jobs = analyzer.parse()
            res_generator = (OrderedDict([(field, j.get(field, fields[field]))
                                          for field in fields]) for j in jobs)
            output_file = os.path.join(base_path,
                                       "bitsdb_%s.csv" % f.split("/")[2])
            save_csv(res_generator,
                     outfile=output_file,
                     file_exists='OVERWRITE')
Пример #18
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        dsstore_files = search.search(r"/\.ds_store$")

        output1 = os.path.join(self.myconfig('outdir'), "dsstore_dump.txt")
        output2 = os.path.join(self.myconfig('outdir'), "dsstore.txt")

        with open(output1, 'w') as out1:
            filelist = set()
            n_stores = 0
            for dstores in dsstore_files:
                out1.write(
                    "{}\n-------------------------------\n".format(dstores))
                with open(os.path.join(self.myconfig('casedir'), dstores),
                          "rb") as ds:
                    try:
                        d = dsstore.DS_Store(ds.read(), debug=False)
                        files = d.traverse_root()
                        for f in files:
                            filelist.add(
                                os.path.join(os.path.dirname(dstores), f))
                            out1.write("%s\n" % f)
                    except Exception as exc:
                        self.logger().warning(
                            "Problems parsing file {}. Error: {}".format(
                                dstores, exc))
                n_stores += 1
                out1.write("\n")

        self.logger().info("Founded {} .DS_Store files".format(n_stores))

        with open(output2, "w") as out:
            for f in sorted(filelist):
                out.write("%s\n" % f)
        self.logger().info("ParseDSStore Done")
        return []
Пример #19
0
class SysCache(base.job.BaseModule):
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing Syscache from registry")
        self.parse_SysCache_hive()
        return []

    def parse_SysCache_hive(self):
        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        # self.tl_file = os.path.join(self.myconfig('timelinesdir'), "%s_BODY.csv" % self.myconfig('source'))
        check_directory(outfolder, create=True)
        SYSC = self.search.search(r"/System Volume Information/SysCache.hve$")

        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')

        for f in SYSC:
            p = f.split('/')[2]
            output_text = run_command([
                ripcmd, "-r",
                os.path.join(self.myconfig('casedir'), f), "-p", "syscache_csv"
            ],
                                      logger=self.logger())
            output_file = os.path.join(outfolder, "syscache_%s.csv" % p)

            self.path_from_inode = FileSystem(
                config=self.config).load_path_from_inode(self.myconfig,
                                                         p,
                                                         vss=self.vss)

            save_csv(self.parse_syscache_csv(p, output_text),
                     outfile=output_file,
                     file_exists='OVERWRITE')

        self.logger().info("Finished extraction from SysCache")

    def parse_syscache_csv(self, partition, text):
        for line in text.split('\n')[:-1]:
            line = line.split(",")
            fileID = line[1]
            inode = line[1].split('/')[0]
            name = self.path_from_inode.get(inode, [''])[0]
            try:
                yield OrderedDict([("Date", dateutil.parser.parse(
                    line[0]).strftime("%Y-%m-%dT%H:%M%SZ")), ("Name", name),
                                   ("FileID", fileID), ("Sha1", line[2])])
            except Exception:
                yield OrderedDict([("Date", dateutil.parser.parse(
                    line[0]).strftime("%Y-%m-%dT%H:%M%SZ")), ("Name", name),
                                   ("FileID", fileID), ("Sha1", "")])
Пример #20
0
    def generate(self, evtx_path):
        """ Auxiliary function """

        check_directory(evtx_path, create=True)
        evtx = self.config.get('plugins.common', 'evtxdump',
                               '/usr/local/bin/evtxdump.pl')

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))
        if self.vss:
            evtx_files = alloc_files.search(r"{}.*\.evtx$".format(
                evtx_path.split('/')[-1]))
        else:
            evtx_files = alloc_files.search(r"\.evtx$")

        errorlog = self.myconfig(
            'errorlog',
            os.path.join(self.myconfig('sourcedir'),
                         "{}_aux.log".format(self.myconfig('source'))))

        for i in evtx_files:
            evtx_file = os.path.join(self.myconfig('casedir'), i)
            if not check_file(evtx_file):
                self.logger().warning('File %s does not exist', evtx_file)
                continue
            self.logger().info("Parsing {}".format(i))
            name = os.path.join(evtx_path, os.path.basename(i))[:-4] + "txt"

            # if the output already exists, continue
            if check_file(name):
                self.logger().debug(
                    'The output file %s ready exists. Skipping', name)
                continue

            with open(name, "wb") as f:
                with open(errorlog, 'a') as logfile:
                    run_command([evtx, evtx_file],
                                stdout=f,
                                stderr=logfile,
                                logger=self.logger())
Пример #21
0
    def run(self, path=""):
        search = GetFiles(self.config, vss=self.myflag("vss"))
        quarantine = search.search(
            "/com.apple.LaunchServices.QuarantineEventsV2$")

        output = os.path.join(self.myconfig('outdir'), "quarantine.txt")

        with open(output, "w") as out:
            for k in quarantine:
                self.logger().info(
                    "Extracting information of file {}".format(k))
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str
                    c = conn.cursor()

                    out.write(
                        "{}\n------------------------------------------\n".
                        format(k))
                    query = '''SELECT LSQuarantineEventIdentifier as id, LSQuarantineTimeStamp as ts, LSQuarantineAgentBundleIdentifier as bundle,
LSQuarantineAgentName as agent_name, LSQuarantineDataURLString as data_url,
LSQuarantineSenderName as sender_name, LSQuarantineSenderAddress as sender_add, LSQuarantineTypeNumber as type_num,
LSQuarantineOriginTitle as o_title, LSQuarantineOriginURLString as o_url, LSQuarantineOriginAlias as o_alias
FROM LSQuarantineEvent  ORDER BY ts;'''.replace('\n', ' ')
                    c.execute(query)

                    out.write(
                        "\n\nid|ts|bundle|agent_name|data_url|sender_name|sender_add|type_num|o_title|o_url|o_alias\n--|--|--|--|--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                            i[8], i[9], i[10]))
                    out.write("\n")
                    c.close()

        self.logger().info("Done parsing QuarantineEvents")
        return []
Пример #22
0
    def run(self, path=None):
        """ Export all pst and ost files in a mounted image. Path is ignored. """
        pffexport = self.myconfig('pffexport')

        outdir = self.myconfig('outdir')
        base.utils.check_directory(outdir, create=True)

        pst_files = GetFiles(
            self.config, vss=self.myflag("vss")).search(r"\.(pst|ost|nst)$")
        index = 0

        for pst_file in tqdm(pst_files,
                             desc=self.section,
                             disable=self.myflag('progress.disable')):
            index += 1
            # save metadata
            yield dict(filename=pst_file,
                       outdir="pff-{}".format(index),
                       index=index)
            try:
                if not os.path.exists(
                        os.path.join(self.myconfig('casedir'), pst_file)):
                    self.logger().warning('File %s does not exist', pst_file)
                    continue
                out_path = os.path.join(outdir, "pff-{}".format(index))
                self.logger().debug("Exporting %s to %s", pst_file, out_path)
                # check if the output directory exist
                for directory in [
                        '{}.export'.format(out_path),
                        '{}.recovered'.format(out_path)
                ]:
                    if base.utils.check_directory(directory):
                        if self.myflag('delete_exists'):
                            base.utils.check_directory(directory,
                                                       delete_exists=True)
                        else:
                            continue
                run_command([
                    pffexport, '-f', 'text', '-m', 'all', '-q', '-t', out_path,
                    pst_file
                ],
                            stderr=subprocess.DEVNULL,
                            from_dir=self.myconfig('casedir'))
            except Exception as exc:
                if self.myflag('stop_on_error'):
                    self.logger().error('Exception %s: %s',
                                        type(exc).__name__, exc)
                    raise base.job.RVTError(exc)
                else:
                    self.logger().warning('Exception %s: %s',
                                          type(exc).__name__, exc)
Пример #23
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        plist_files = search.search(r"\.plist$")

        plist_num = 0
        with open(os.path.join(self.myconfig('outdir'), "plist_dump.txt"),
                  'wb') as output:
            for pl in plist_files:
                plist_num += 1
                output.write("{}\n-------------------------------\n".format(
                    pl).encode())
                # try:
                #     text = subprocess.check_output(["plistutil", "-i", os.path.join(self.myconfig('mountdir'), pl)])
                #     output.write(text)
                #     output.write(b"\n\n")
                # except:
                #     self.logger().warning("Problems with file %s" % pl)
                #     output.write(b"\n\n")

                try:
                    plist = biplist.readPlist(
                        os.path.join(self.myconfig('casedir'), pl))
                    output.write(self.pprint(plist) + b"\n\n")
                except (biplist.InvalidPlistException,
                        biplist.NotBinaryPlistException):
                    self.logger().info("%s not a plist file or is corrupted" %
                                       pl)
                    output.write(b"\n\n")
                except Exception:
                    self.logger().info("Problems with file %s" % pl)

        self.logger().info("Founded {} plist files".format(plist_num))
        self.logger().info("Done parsing Plist")
        return []
Пример #24
0
    def get_hives(self, p):
        """ Obtain the paths to registry hives

        Arguments:
            p (str): partition number. Ex: 'p03'
        """
        regfiles = {}

        Find = GetFiles(self.config, vss=self.myflag("vss"))

        for item in Find.search(
                "{}/Windows/System32/config/(SYSTEM|SOFTWARE|SAM|SECURITY)$".
                format(p)):
            hive = item.split('/')[-1].lower()
            regfiles[hive] = os.path.join(self.myconfig('casedir'), item)

        if "software" not in regfiles.keys():
            self.logger().warning(
                'SOFTWARE hive not found in partition {}. Skipping this partition'
                .format(p))
            return {}

        NTUSER = Find.search(
            r"{}/(Documents and settings|users)/.*/(NTUSER|UsrClass)\.dat$".
            format(p))

        usr = []
        regfiles["ntuser"] = {}
        regfiles["usrclass"] = {}

        for item in NTUSER:
            aux = re.search("(Documents and settings|Users)/([^/]*)/", item,
                            re.I)
            user = aux.group(2)
            if user not in usr:
                usr.append(user)
                regfiles["ntuser"][user] = ""
                regfiles["usrclass"][user] = ""
            if item.lower().endswith("ntuser.dat"):
                regfiles["ntuser"][user] = os.path.join(
                    self.myconfig('casedir'), item)
            else:
                regfiles["usrclass"][user] = os.path.join(
                    self.myconfig('casedir'), item)

        amcache = list(
            Find.search("{}/Windows/AppCompat/Programs/Amcache.hve".format(p)))
        if len(amcache) != 0:
            regfiles["amcache"] = os.path.join(self.myconfig('casedir'),
                                               amcache[0])
        syscache = list(Find.search(r"{}.*/syscache.hve$".format(p)))
        if len(syscache) != 0:
            regfiles["syscache"] = os.path.join(self.myconfig('casedir'),
                                                syscache[0])

        return regfiles
Пример #25
0
    def GetDhcpInfo(self):
        '''Read dhcp leases & interface entries

           Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        interfaces_path = search.search("/private/var/db/dhcpclient/leases$")

        out = open(os.path.join(self.myconfig('outdir'), 'Network_DHCP.csv'),
                   'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "Interface", "MAC_Address", "IPAddress", "LeaseLength",
            "LeaseStartDate", "PacketData", "RouterHardwareAddress",
            "RouterIPAddress", "SSID", "Source"
        ]
        writer.writerow(headers)

        for interface in interfaces_path:
            for name in sorted(
                    os.listdir(
                        os.path.join(self.myconfig('casedir'), interface))):
                if name.find(",") > 0:
                    # Process plist
                    name_no_ext = os.path.splitext(
                        name
                    )[0]  # not needed as there is no .plist extension on these files
                    if_name, mac_address = name_no_ext.split(",")
                    self.logger().info(
                        "Found mac address = {} on interface {}".format(
                            mac_address, if_name))

                    self.logger().debug("Trying to read {}".format(name))

                    plist = biplist.readPlist(
                        os.path.join(self.myconfig('casedir'), interface,
                                     name))
                    interface_info = {}
                    for c in headers:
                        interface_info[c] = ""
                    interface_info['Source'] = os.path.join(
                        '/private/var/db/dhcpclient/leases', name)
                    interface_info['Interface'] = if_name
                    interface_info['MAC_Address'] = mac_address

                    for item, value in plist.items():
                        if item in ('IPAddress', 'LeaseLength',
                                    'LeaseStartDate', 'RouterIPAddress',
                                    'SSID'):
                            interface_info[item] = value
                        elif item == 'RouterHardwareAddress':  # convert binary blob to MAC address
                            data = value.hex().upper()
                            data = [data[2 * n:2 * n + 2] for n in range(6)]
                            interface_info[item] = ":".join(data)
                        elif item == 'PacketData':
                            interface_info['PacketData'] = value.hex().upper()
                        else:
                            self.logger().info(
                                "Found unknown item in plist: ITEM=" + item +
                                " VALUE=" + str(value))
                    writer.writerow([interface_info[c] for c in headers])
                else:
                    self.logger().info(
                        "Found unexpected file, not processing /private/var/db/dhcpclient/leases/{} size={}"
                        .format(name, str(interface['size'])))
            # Done processing interfaces!
        out.close()
Пример #26
0
    def GetNetworkInterface2Info(self):
        '''Read interface info from /Library/Preferences/SystemConfiguration/preferences.plist

        Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        network = search.search(
            "/Library/Preferences/SystemConfiguration/preferences.plist$")

        with open(os.path.join(self.myconfig('outdir'), 'Network_Details.csv'),
                  'w') as out:
            writer = csv.writer(out, delimiter="|", quotechar='"')
            headers = [
                "UUID", "IPv4.ConfigMethod", "IPv6.ConfigMethod", "DeviceName",
                "Hardware", "Type", "SubType", "UserDefinedName",
                "Proxies.ExceptionsList", "SMB.NetBIOSName", "SMB.Workgroup",
                "PPP", "Modem"
            ]
            writer.writerow(headers)
            for net in network:
                plist = biplist.readPlist(
                    os.path.join(self.myconfig('casedir'), net))
                for uuid in plist['NetworkServices'].keys():
                    data = [uuid] + [""] * 12
                    if 'IPv4' in plist['NetworkServices'][uuid].keys():
                        data[1] = plist['NetworkServices'][uuid]['IPv4'][
                            'ConfigMethod']
                    if 'IPv6' in plist['NetworkServices'][uuid].keys():
                        data[2] = plist['NetworkServices'][uuid]['IPv6'][
                            'ConfigMethod']
                    if 'Interface' in plist['NetworkServices'][uuid].keys():
                        data[3] = plist['NetworkServices'][uuid]['Interface'][
                            'DeviceName']
                        data[4] = plist['NetworkServices'][uuid]['Interface'][
                            'Hardware']
                        data[5] = plist['NetworkServices'][uuid]['Interface'][
                            'Type']
                        if 'SubType' in plist['NetworkServices'][uuid][
                                'Interface'].keys():
                            data[6] = plist['NetworkServices'][uuid][
                                'Interface']['SubType']
                        data[7] = plist['NetworkServices'][uuid]['Interface'][
                            'UserDefinedName']

                    if 'Proxies' in plist['NetworkServices'][uuid].keys(
                    ) and 'ExceptionsList' in plist['NetworkServices'][uuid][
                            'Proxies'].keys():
                        data[8] = ",".join(plist['NetworkServices'][uuid]
                                           ['Proxies']['ExceptionsList'])
                    if 'SMB' in plist['NetworkServices'][uuid].keys():
                        try:
                            data[9] = plist['NetworkServices'][uuid]['SMB'][
                                'NetBIOSName']
                            data[10] = plist['NetworkServices'][uuid]['SMB'][
                                'Workgroup']
                        except Exception:
                            pass
                    if 'PPP' in plist['NetworkServices'][uuid].keys():
                        data[11] = str(plist['NetworkServices'][uuid]['PPP'])
                    if 'Modem' in plist['NetworkServices'][uuid].keys():
                        data[12] = str(plist['NetworkServices'][uuid]['Modem'])
                    writer.writerow(data)
Пример #27
0
    def GetNetworkInterfaceInfo(self):
        '''Read interface info from NetworkInterfaces.plist
        modified from networking plugin from https://github.com/ydkhatri/mac_apt'''

        search = GetFiles(self.config, vss=self.myflag("vss"))
        network = search.search(
            "/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist$"
        )
        classes = [
            'Active', 'BSD Name', 'IOBuiltin', 'IOInterfaceNamePrefix',
            'IOInterfaceType', 'IOInterfaceUnit', 'IOPathMatch',
            'SCNetworkInterfaceType'
        ]

        out = open(
            os.path.join(self.myconfig('outdir'), 'Network_Interfaces.csv'),
            'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "Category", "Active", "BSD Name", "IOBuiltin",
            "IOInterfaceNamePrefix", "IOInterfaceType", "IOInterfaceUnit",
            "IOMACAddress", "IOPathMatch", "SCNetworkInterfaceInfo",
            "SCNetworkInterfaceType", "Source"
        ]
        writer.writerow(headers)

        for net in network:
            self.logger().debug("Trying to read {}".format(net))
            # try:
            plist = biplist.readPlist(
                os.path.join(self.myconfig('casedir'), net))
            try:
                self.logger().info("Model = %s" % plist['Model'])
            except Exception:
                pass
            for category, cat_array in plist.items(
            ):  # value is another array in this dict
                if not category.startswith('Interface'):
                    if category != 'Model':
                        self.logger().debug('Skipping %s' % category)
                    continue
                for interface in cat_array:
                    interface_info = {'Category': category, 'Source': net}
                    for c in classes:
                        interface_info[c] = ""
                    for item, value in interface.items():
                        if item in classes:
                            interface_info[item] = value
                        elif item == 'IOMACAddress':  # convert binary blob to MAC address
                            data = value.hex().upper()
                            data = [data[2 * n:2 * n + 2] for n in range(6)]
                            interface_info[item] = ":".join(data)
                        elif item == 'SCNetworkInterfaceInfo':
                            try:
                                interface_info[
                                    'SCNetworkInterfaceInfo'] = value[
                                        'UserDefinedName']
                            except Exception:
                                pass
                        else:
                            self.logger().info(
                                "Found unknown item in plist: ITEM=" + item +
                                " VALUE=" + str(value))
                    writer.writerow([interface_info[c] for c in headers])
        out.close()
Пример #28
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        info_path = self.myconfig('outdir')
        check_folder(info_path)
        search = GetFiles(self.config, vss=self.myflag("vss"))
        asl_files = list(search.search(r"var/log/asl/.*\.asl$"))

        # asl dump
        with open(os.path.join(info_path, "asldump.csv"), "w") as out_asl:
            writer = csv.writer(out_asl, delimiter="|", quotechar='"')
            headers = [
                "Timestamp", "Host", "Sender", "PID", "Reference Process",
                "Reference PID", "Facility", "Level", "Message",
                "Other details"
            ]
            writer.writerow(headers)
            for file in asl_files:
                self.logger().info("Processing: {}".format(file))
                try:
                    f = open(os.path.join(self.myconfig('casedir'), file),
                             "rb")
                except IOError as e:
                    self.logger().error(
                        "Could not open file '{}' ({}): Skipping this file".
                        format(file, e))
                    continue

                try:
                    db = ccl_asldb.AslDb(f)
                except ccl_asldb.AslDbError as e:
                    self.logger().error(
                        "Could not read file as ASL DB '{}' ({}): Skipping this file"
                        .format(file, e))
                    f.close()
                    continue

                for record in db:
                    writer.writerow([
                        record.timestamp.isoformat(), record.host,
                        record.sender,
                        str(record.pid),
                        str(record.refproc),
                        str(record.refpid), record.facility, record.level_str,
                        record.message.replace("\n",
                                               " ").replace("\t", "    "),
                        "; ".join([
                            "{0}='{1}'".format(key, record.key_value_dict[key])
                            for key in record.key_value_dict
                        ]).replace("\n", " ").replace("\t", "    ")
                    ])
                f.close()

        asl_path = list(set(os.path.dirname(asl) for asl in asl_files))

        for path in asl_path:
            self.logger().info("Processing files from folder: {}".format(path))
            OSX_asl_login_timeline.__dowork__(
                (os.path.join(self.myconfig('casedir'), path), ),
                (os.path.join(self.myconfig('outdir'), "login_power.md"), ))
        self.logger().info("Done ASL")
        return []
Пример #29
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        knowledgec = search.search("/knowledgec.db$")

        knowledgec_path = self.myconfig('outdir')
        check_folder(knowledgec_path)

        for k in knowledgec:
            self.logger().info("Processing file {}".format(k))
            if k.find('/Users/') < 0:
                output = os.path.join(knowledgec_path, "private.txt")
            else:
                aux = re.search("/Users/([^/]+)", k)
                output = os.path.join(knowledgec_path,
                                      "{}.txt".format(aux.group(1)))

            with open(output, "w") as out:
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str

                    c = conn.cursor()
                    c.execute(
                        'SELECT DISTINCT ZOBJECT.ZSTREAMNAME FROM ZOBJECT ORDER BY ZSTREAMNAME;'
                    )

                    for i in c.fetchall():
                        out.write("{}\n".format(i[0]))

                    c.execute(
                        '''SELECT datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK",ZOBJECT.ZSECONDSFROMGMT/3600 AS "GMT OFFSET", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START",
datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END", (ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS",
ZOBJECT.ZSTREAMNAME,ZOBJECT.ZVALUESTRING FROM ZOBJECT WHERE ZSTREAMNAME IS "/app/inFocus" ORDER BY "START";'''
                    )

                    out.write(
                        "\n\nENTRY CREATION|DAY OF WEEK|GMT OFFSET|START|END|USAGE IN SECONDS|ZSTREAMNAME|ZVALUESTRING\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))

                    c.execute('''SELECT
datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", ZOBJECT.ZSECONDSFROMGMT/3600 AS "GMT OFFSET",
CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START",
datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END", (ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS", ZOBJECT.ZSTREAMNAME,
ZOBJECT.ZVALUESTRING, ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__ACTIVITYTYPE AS "ACTIVITY TYPE",
ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__TITLE as "TITLE", ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__USERACTIVITYREQUIREDSTRING as "ACTIVITY STRING",
datetime(ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__EXPIRATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "EXPIRATION DATE"
FROM ZOBJECT left join ZSTRUCTUREDMETADATA on ZOBJECT.ZSTRUCTUREDMETADATA = ZSTRUCTUREDMETADATA.Z_PK WHERE ZSTREAMNAME is "/app/activity" or ZSTREAMNAME is "/app/inFocus"
ORDER BY "START";''')

                    out.write(
                        "\n\nENTRY CREATION|GMT OFFSET|DAY OF WEEK|START|END|USAGE IN SECONDS|ZSTREAMNAME|ZVALUESTRING|ACTIVITY TYPE|TITLE|ACTIVITY STRING|EXPIRATION DATE\n"
                    )
                    for i in c.fetchall():
                        out.write(
                            "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                                i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                                i[8], i[9], i[10], i[11]))

                    c.execute('''SELECT
datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START", datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END",
(ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS", ZOBJECT.ZSTREAMNAME, ZOBJECT.ZVALUESTRING, ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__ACTIVITYTYPE AS "ACTIVITY TYPE",
ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__TITLE as "TITLE", ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__USERACTIVITYREQUIREDSTRING as "ACTIVITY STRING",
datetime(ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__EXPIRATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "EXPIRATION DATE",
ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__INTENTCLASS as "INTENT CLASS", ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__INTENTVERB as "INTENT VERB",
ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__SERIALIZEDINTERACTION as "SERIALIZED INTERACTION", ZSOURCE.ZBUNDLEID FROM ZOBJECT
left join ZSTRUCTUREDMETADATA on ZOBJECT.ZSTRUCTUREDMETADATA = ZSTRUCTUREDMETADATA.Z_PK left join ZSOURCE on ZOBJECT.ZSOURCE = ZSOURCE.Z_PK
WHERE ZSTREAMNAME is "/app/activity" or ZSTREAMNAME is "/app/inFocus" or ZSTREAMNAME is "/app/intents" ORDER BY "START";'''
                              )

                    out.write(
                        "\n\nENTRY CREATION|DAY OF WEEK|START|END|TITLE|ACTIVITY STRING|EXPIRATION DATE|INTENT CLASS|INTENT VERB|SERIALIZED INTERACTION|ZBUNDLEID\n"
                    )
                    for i in c.fetchall():
                        out.write(
                            "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                                i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                                i[8], i[9], i[10], i[11]))

        self.logger().info("Done parsing KnowledgeC")
        return []
Пример #30
0
class AmCache(base.job.BaseModule):
    """ Parses Amcache.hve registry hive. """
    def run(self, path=""):
        vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=vss)

        outfolder = self.myconfig('voutdir') if vss else self.myconfig(
            'outdir')
        check_directory(outfolder, create=True)

        amcache_hives = [path] if path else self.search.search("Amcache.hve$")
        for am_file in amcache_hives:
            self.amcache_path = os.path.join(self.myconfig('casedir'), am_file)
            partition = am_file.split("/")[2]
            self.logger().info("Parsing {}".format(am_file))
            self.outfile = os.path.join(outfolder,
                                        "amcache_{}.csv".format(partition))

            try:
                reg = Registry.Registry(
                    os.path.join(self.myconfig('casedir'), am_file))
                entries = self.parse_amcache_entries(reg)
                save_csv(entries,
                         outfile=self.outfile,
                         file_exists='OVERWRITE',
                         quoting=0)
            except KeyError:
                self.logger().warning(
                    "Expected subkeys not found in hive file: {}".format(
                        am_file))
            except Exception as exc:
                self.logger().warning("Problems parsing: {}. Error: {}".format(
                    am_file, exc))

        self.logger().info("Amcache.hve parsing finished")
        return []

    def parse_amcache_entries(self, registry):
        """ Return a generator of dictionaries describing each entry in the hive.

        Fields:
            * KeyLastWrite: Possible application first executed time (must be tested)
            * AppPath: application path inside the volume
            * AppName: friendly name for application, if any
            * Sha1Hash: binary file SHA-1 hash value
            * GUID: Volume GUID the application was executed from
        """
        # Hive subkeys may have two different subkeys
        #   * {GUID}\\Root\\File
        #   * {GUID}\\Root\\InventoryApplicationFile
        found_key = ''
        structures = {
            'File': self._parse_File_entries,
            'InventoryApplicationFile': self._parse_IAF_entries
        }
        for key, func in structures.items():
            try:
                volumes = registry.open("Root\\{}".format(key))
                found_key = key
                self.logger().debug(
                    'Parsing entries in key: Root\\{}'.format(key))
                for app in func(volumes):
                    yield app
            except Registry.RegistryKeyNotFoundException:
                self.logger().info('Key "Root\\{}" not found'.format(key))

        if not found_key:
            raise KeyError

    def _parse_File_entries(self, volumes):
        """ Parses File subkey entries for amcache hive """
        fields = {
            'LastModified': "17",
            'AppPath': "15",
            'AppName': "0",
            'Sha1Hash': "101"
        }
        for volumekey in volumes.subkeys():
            for filekey in volumekey.subkeys():
                app = OrderedDict([('KeyLastWrite', WINDOWS_TIMESTAMP_ZERO),
                                   ('AppPath', ''), ('AppName', ''),
                                   ('Sha1Hash', ''),
                                   ('LastModified', WINDOWS_TIMESTAMP_ZERO),
                                   ('GUID', '')])
                app['GUID'] = volumekey.path().split('}')[0][1:]
                app['KeyLastWrite'] = filekey.timestamp()
                for f in fields:
                    try:
                        val = filekey.value(fields[f]).value()
                        if f == 'Sha1Hash':
                            val = val[4:]
                        elif f == 'LastModified':
                            val = parse_windows_timestamp(val).strftime(
                                "%Y-%m-%d %H:%M:%S")
                        app.update({f: val})
                    except Registry.RegistryValueNotFoundException:
                        pass
                yield app

    def _parse_IAF_entries(self, volumes):
        """ Parses InventoryApplicationFile subkey entries for amcache hive.

        Yields: dict with keys'FirstRun','AppPath') """
        names = {
            'LowerCaseLongPath': 'AppPath',
            'FileId': 'Sha1Hash',
            'ProductName': 'AppName'
        }
        for volumekey in volumes.subkeys():
            app = OrderedDict([('KeyLastWrite', WINDOWS_TIMESTAMP_ZERO),
                               ('AppPath', ''), ('AppName', ''),
                               ('Sha1Hash', ''),
                               ('LastModified', WINDOWS_TIMESTAMP_ZERO),
                               ('GUID', '')])
            app['GUID'] = volumekey.path().split('}')[0][1:]
            app['KeyLastWrite'] = volumekey.timestamp()
            for v in volumekey.values():
                if v.name() in ['LowerCaseLongPath', 'ProductName']:
                    app.update({names.get(v.name(), v.name()): v.value()})
                elif v.name() == 'FileId':
                    sha = v.value()[
                        4:]  # SHA-1 hash is registered 4 0's padded
                    app.update({names.get(v.name(), v.name()): sha})
            yield app