Ejemplo n.º 1
0
    def run(self, path=""):
        """ Extracts SRUM artifacts of a disk """
        vss = self.myflag('vss')
        SRUM_TEMPLATE = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/SRUM_TEMPLATE2.xlsx")
        srum = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/srum_dump2.py")
        check_file(SRUM_TEMPLATE, error_missing=True)

        Search = GetFiles(self.config, vss=self.myflag("vss"))
        SOFTWARE = list(Search.search('windows/system32/config/SOFTWARE$'))
        SRUDB = list(Search.search('/windows/system32/sru/SRUDB.dat$'))
        python3 = os.path.join(self.myconfig('rvthome'), ".venv/bin/python3")

        out_folder = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(out_folder, create=True)

        if not SRUDB:
            self.logger().info("SRUDB.dat not found in any partition of the disk")
            return []

        for soft in SOFTWARE:
            partition = soft.split('/')[2]
            for srudb in SRUDB:
                if srudb.split('/')[2] == partition:
                    self.logger().info("Parsing SRUDB from partition {}".format(partition))
                    out_file = os.path.join(out_folder, 'srum_{}.xlsx'.format(partition))
                    run_command([python3, srum, "-i", os.path.join(self.myconfig('casedir'), srudb), "-t", SRUM_TEMPLATE,
                                "-r", os.path.join(self.myconfig('casedir'), soft), "-o", out_file], logger=self.logger())

                    self.convert_to_csv(out_folder, partition)
                    os.remove(out_file)
                    break
            else:
                self.logger().info("SRUDB.dat not found in partition: {}".format(partition))

        return []
Ejemplo n.º 2
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/UnifiedLogReader/scripts/UnifiedLogReader.py")
        uuidtext = search.search("/var/db/uuidtext$")
        timesync = search.search("/var/db/diagnostics/timesync$")
        diagnostics = search.search("/var/db/diagnostics$")

        ulr_path = self.myconfig('outdir')
        check_folder(ulr_path)

        if not uuidtext or not timesync or not diagnostics:
            return []

        python3 = '/usr/bin/python3'

        try:
            run_command([
                python3, parser,
                os.path.join(self.myconfig('casedir'), uuidtext[0]),
                os.path.join(self.myconfig('casedir'), timesync[0]),
                os.path.join(self.myconfig('casedir'), diagnostics[0]),
                ulr_path, "-l", "WARNING"
            ])
        except Exception as exc:
            self.logger().error(
                'Problems with UnifiedLogReader.py. Error:'.format(exc))
        self.logger().info("Done parsing UnifiedLogReader")
        return []
Ejemplo n.º 3
0
    def get_hives(self, p):
        """ Obtain the paths to registry hives

        Arguments:
            p (str): partition number. Ex: 'p03'
        """
        regfiles = {}

        Find = GetFiles(self.config, vss=self.myflag("vss"))

        for item in Find.search(
                "{}/Windows/System32/config/(SYSTEM|SOFTWARE|SAM|SECURITY)$".
                format(p)):
            hive = item.split('/')[-1].lower()
            regfiles[hive] = os.path.join(self.myconfig('casedir'), item)

        if "software" not in regfiles.keys():
            self.logger().warning(
                'SOFTWARE hive not found in partition {}. Skipping this partition'
                .format(p))
            return {}

        NTUSER = Find.search(
            r"{}/(Documents and settings|users)/.*/(NTUSER|UsrClass)\.dat$".
            format(p))

        usr = []
        regfiles["ntuser"] = {}
        regfiles["usrclass"] = {}

        for item in NTUSER:
            aux = re.search("(Documents and settings|Users)/([^/]*)/", item,
                            re.I)
            user = aux.group(2)
            if user not in usr:
                usr.append(user)
                regfiles["ntuser"][user] = ""
                regfiles["usrclass"][user] = ""
            if item.lower().endswith("ntuser.dat"):
                regfiles["ntuser"][user] = os.path.join(
                    self.myconfig('casedir'), item)
            else:
                regfiles["usrclass"][user] = os.path.join(
                    self.myconfig('casedir'), item)

        amcache = list(
            Find.search("{}/Windows/AppCompat/Programs/Amcache.hve".format(p)))
        if len(amcache) != 0:
            regfiles["amcache"] = os.path.join(self.myconfig('casedir'),
                                               amcache[0])
        syscache = list(Find.search(r"{}.*/syscache.hve$".format(p)))
        if len(syscache) != 0:
            regfiles["syscache"] = os.path.join(self.myconfig('casedir'),
                                                syscache[0])

        return regfiles
Ejemplo n.º 4
0
class ActivitiesCache(base.job.BaseModule):

    def run(self, path=""):
        """ Parses activities cache

        """

        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.logger().info("Parsing Activities Cache files")
        vss = self.myflag('vss')

        if vss:
            base_path = self.myconfig('voutdir')
        else:
            base_path = self.myconfig('outdir')
        check_folder(base_path)

        activities = self.search.search("/ConnectedDevicesPlatform/.*/ActivitiesCache.db$")

        activities_cache_parser = self.myconfig('activities_cache_parser', os.path.join(self.myconfig('rvthome'), '.venv/bin/winactivities2json.py'))
        python3 = self.myconfig('python3', os.path.join(self.myconfig('rvthome'), '.venv/bin/python3'))

        for act in activities:
            with open(os.path.join(base_path, '{}_activitycache_{}.json'.format(act.split('/')[2], act.split('/')[-2])), 'w') as out_file:
                run_command([python3, activities_cache_parser, '-s', act], from_dir=self.myconfig('casedir'), stdout=out_file)
        return []
Ejemplo n.º 5
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/spotlight_parser/spotlight_parser.py")
        spotlight = search.search(r"/\.spotlight.*/store.db$")

        spotlight_path = self.myconfig('outdir')
        check_folder(spotlight_path)

        # TODO: adapt external spotlight_parser.py script to python3
        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        errorlog = os.path.join(self.myconfig('sourcedir'),
                                "{}_aux.log".format(self.myconfig('source')))
        with open(errorlog, 'a') as logfile:
            for f in spotlight:
                self.logger().info("Processing file {}".format(f))
                run_command([
                    python, parser,
                    os.path.join(self.myconfig('casedir'), f), spotlight_path,
                    "-p",
                    "spot-%s" % str(n)
                ],
                            stdout=logfile,
                            stderr=logfile)
                n += 1
        self.logger().info("Spotlight done")
        return []
Ejemplo n.º 6
0
    def get_evtx(self, path, regex_search):
        """ Retrieve the evtx file to parse.
        Take 'path' if is defined and exists.
        Otherwise take first coincidence of the corresponding evtx file in the filesystem

        Attrs:
            path: path to evtx as defined in job
            regex_search: regex expression to search in file system allocated files

        """
        if path:
            if os.path.exists(path):
                return path
            else:
                raise base.job.RVTError('path {} does not exist'.format(path))

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))

        evtx_files = alloc_files.search(regex_search)
        if len(evtx_files) < 1:
            self.logger().info("{} matches not found in filesystem".format(regex_search))
            return ''
        if len(evtx_files) > 1:
            self.logger().warning("More than one file matches {}. Only parsing the file {}".format(regex_search, evtx_files[0]))

        return os.path.join(self.myconfig('casedir'), evtx_files[0])
Ejemplo n.º 7
0
    def run(self, path=""):

        search = GetFiles(self.config, vss=self.myflag("vss"))
        users = search.search(r"p\d+(/root)?/Users/[^/]+$")
        mru_path = self.myconfig('outdir')
        check_folder(mru_path)

        parser = os.path.join(self.myconfig('rvthome'),
                              "plugins/external/macMRU/macMRU.py")
        python3 = os.path.join(self.myconfig('rvthome'), '.venv/bin/python3')

        for user in users:
            self.logger().info("Extracting MRU info from user {}".format(
                os.path.basename(user)))
            with open(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user)),
                    'w') as f:
                self.logger().debug("Generating file {}".format(
                    os.path.join(mru_path, '%s.txt' % os.path.basename(user))))
                run_command([
                    python3, parser,
                    os.path.join(self.myconfig('casedir'), user)
                ],
                            stdout=f)

        self.logger().info("Done parsing MacMRU")
        return []
Ejemplo n.º 8
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        parser = os.path.join(
            self.myconfig('rvthome'),
            "plugins/external/FSEventsParser/FSEParser_V4.0.py")
        fsevents = search.search(r"\.fseventsd$")

        fsevents_path = self.myconfig('outdir')
        check_folder(fsevents_path)

        python = self.myconfig('python', '/usr/bin/python')

        n = 1
        for f in fsevents:
            self.logger().info("Processing file {}".format(f))
            run_command([
                python, parser, "-c", "Report_{}".format(f.split('/')[-2]),
                "-s",
                os.path.join(self.myconfig('casedir'),
                             f), "-t", "folder", "-o", fsevents_path, "-q",
                os.path.join(
                    self.myconfig('rvthome'),
                    "plugins/external/FSEventsParser/report_queries.json")
            ])
            n += 1
        self.logger().info("Done FSEvents")
        return []
Ejemplo n.º 9
0
    def run(self, path=""):
        """ Main function to extract quick look information

        """

        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        ql_path = self.myconfig("outdir")

        check_folder(ql_path)

        search = GetFiles(self.config, vss=self.myflag("vss"))

        ql_list = search.search("QuickLook.thumbnailcache$")

        for i in ql_list:
            self.logger().info("Extracting quicklook data from {}".format(i))
            out_path = os.path.join(ql_path, i.split("/")[-3])
            if not os.path.isdir(out_path):
                os.mkdir(out_path)
            quicklook_parser_v_3_5mod.process_database(
                os.path.join(self.myconfig('casedir'), i), out_path)
        self.logger().info("Done QuickLook")
        return []
Ejemplo n.º 10
0
class ShimCache(base.job.BaseModule):
    """ Extracts ShimCache information from registry hives. """

    # TODO: .sdb shim database files (ex: Windows/AppPatch/sysmain.sdb)

    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing ShimCache from registry")

        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        SYSTEM = list(self.search.search(r"windows/System32/config/SYSTEM$"))
        check_directory(outfolder, create=True)

        partition_list = set()
        for f in SYSTEM:
            aux = re.search(r"([vp\d]*)/windows/System32/config", f, re.I)
            partition_list.add(aux.group(1))

        output_files = {
            p: os.path.join(outfolder, "shimcache_%s.csv" % p)
            for p in partition_list
        }

        for f in SYSTEM:
            save_csv(self.parse_ShimCache_hive(f),
                     outfile=output_files[f.split("/")[2]],
                     file_exists='OVERWRITE',
                     quoting=0)

        self.logger().info("Finished extraction from ShimCache")
        return []

    def parse_ShimCache_hive(self, sysfile):
        """ Launch shimcache regripper plugin and parse results """
        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')
        date_regex = re.compile(
            r'\w{3}\s\w{3}\s+\d+\s\d{2}:\d{2}:\d{2}\s\d{4} Z')

        res = run_command([
            ripcmd, "-r",
            os.path.join(self.myconfig('casedir'), sysfile), "-p", "shimcache"
        ],
                          logger=self.logger())
        for line in res.split('\n'):
            if ':' not in line[:4]:
                continue
            matches = re.search(date_regex, line)
            if matches:
                path = line[:matches.span()[0] - 2]
                date = str(
                    datetime.datetime.strptime(matches.group(),
                                               '%a %b %d %H:%M:%S %Y Z'))
                executed = bool(len(line[matches.span()[1]:]))
                yield OrderedDict([('LastModified', date), ('AppPath', path),
                                   ('Executed', executed)])
Ejemplo n.º 11
0
class Bits(base.job.BaseModule):
    """ Parse Background Intelligent Transfer Service. """
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing Bits database")
        self.parse_BITS()
        return []

    def parse_BITS(self):
        if self.vss:
            base_path = self.myconfig('voutdir')
            bitsdb = self.search.search(
                r"v\d+p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$"
            )
        else:
            base_path = self.myconfig('outdir')
            bitsdb = self.search.search(
                r"p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$")
        check_directory(base_path, create=True)

        fields = OrderedDict([('job_id', None), ('name', None), ('desc', None),
                              ('type', None), ('priority', None),
                              ('sid', None), ('state', None), ('cmd', None),
                              ('args', None), ('file_count', 0),
                              ('file_id', 0), ('dest_fn', None),
                              ('src_fn', None), ('tmp_fn', None),
                              ('download_size', -1), ('transfer_size', -1),
                              ('drive', None), ('vol_guid', None),
                              ('ctime', None), ('mtime', None),
                              ('other_time0', None), ('other_time1', None),
                              ('other_time2', None), ('carved', False)])

        for f in bitsdb:
            analyzer = bits.Bits.load_file(
                os.path.join(self.myconfig('casedir'), f))
            jobs = analyzer.parse()
            res_generator = (OrderedDict([(field, j.get(field, fields[field]))
                                          for field in fields]) for j in jobs)
            output_file = os.path.join(base_path,
                                       "bitsdb_%s.csv" % f.split("/")[2])
            save_csv(res_generator,
                     outfile=output_file,
                     file_exists='OVERWRITE')
Ejemplo n.º 12
0
class SysCache(base.job.BaseModule):
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing Syscache from registry")
        self.parse_SysCache_hive()
        return []

    def parse_SysCache_hive(self):
        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        # self.tl_file = os.path.join(self.myconfig('timelinesdir'), "%s_BODY.csv" % self.myconfig('source'))
        check_directory(outfolder, create=True)
        SYSC = self.search.search(r"/System Volume Information/SysCache.hve$")

        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')

        for f in SYSC:
            p = f.split('/')[2]
            output_text = run_command([
                ripcmd, "-r",
                os.path.join(self.myconfig('casedir'), f), "-p", "syscache_csv"
            ],
                                      logger=self.logger())
            output_file = os.path.join(outfolder, "syscache_%s.csv" % p)

            self.path_from_inode = FileSystem(
                config=self.config).load_path_from_inode(self.myconfig,
                                                         p,
                                                         vss=self.vss)

            save_csv(self.parse_syscache_csv(p, output_text),
                     outfile=output_file,
                     file_exists='OVERWRITE')

        self.logger().info("Finished extraction from SysCache")

    def parse_syscache_csv(self, partition, text):
        for line in text.split('\n')[:-1]:
            line = line.split(",")
            fileID = line[1]
            inode = line[1].split('/')[0]
            name = self.path_from_inode.get(inode, [''])[0]
            try:
                yield OrderedDict([("Date", dateutil.parser.parse(
                    line[0]).strftime("%Y-%m-%dT%H:%M%SZ")), ("Name", name),
                                   ("FileID", fileID), ("Sha1", line[2])])
            except Exception:
                yield OrderedDict([("Date", dateutil.parser.parse(
                    line[0]).strftime("%Y-%m-%dT%H:%M%SZ")), ("Name", name),
                                   ("FileID", fileID), ("Sha1", "")])
Ejemplo n.º 13
0
    def generate(self, evtx_path):
        """ Auxiliary function """

        check_directory(evtx_path, create=True)
        evtx = self.config.get('plugins.common', 'evtxdump',
                               '/usr/local/bin/evtxdump.pl')

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))
        if self.vss:
            evtx_files = alloc_files.search(r"{}.*\.evtx$".format(
                evtx_path.split('/')[-1]))
        else:
            evtx_files = alloc_files.search(r"\.evtx$")

        errorlog = self.myconfig(
            'errorlog',
            os.path.join(self.myconfig('sourcedir'),
                         "{}_aux.log".format(self.myconfig('source'))))

        for i in evtx_files:
            evtx_file = os.path.join(self.myconfig('casedir'), i)
            if not check_file(evtx_file):
                self.logger().warning('File %s does not exist', evtx_file)
                continue
            self.logger().info("Parsing {}".format(i))
            name = os.path.join(evtx_path, os.path.basename(i))[:-4] + "txt"

            # if the output already exists, continue
            if check_file(name):
                self.logger().debug(
                    'The output file %s ready exists. Skipping', name)
                continue

            with open(name, "wb") as f:
                with open(errorlog, 'a') as logfile:
                    run_command([evtx, evtx_file],
                                stdout=f,
                                stderr=logfile,
                                logger=self.logger())
Ejemplo n.º 14
0
    def ProcessActiveDirectoryPlist(self):
        '''
        Extract active directory artifacts

        Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        network_paths = search.search(
            "/Library/Preferences/OpenDirectory/Configurations/Active Directory$"
        )

        out = open(
            os.path.join(self.myconfig('outdir'),
                         'Domain_ActiveDirectory.csv'), 'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "node name", "trustaccount", "trustkerberosprincipal", "trusttype",
            "allow multi-domain", "cache last user logon", "domain", "forest",
            "trust domain", "source"
        ]
        writer.writerow(headers)

        for plist_path in network_paths:
            active_directory = {'source': plist_path}
            for archive in sorted(
                    os.listdir(
                        os.path.join(self.myconfig('casedir'), plist_path))):
                plist = biplist.readPlist(
                    os.path.join(self.myconfig('casedir'), plist_path,
                                 archive))
                try:
                    for item, value in plist.items():
                        if item in [
                                'node name', 'trustaccount',
                                'trustkerberosprincipal', 'trusttype'
                        ]:
                            active_directory[item] = value
                    ad_dict = plist['module options']['ActiveDirectory']
                    for item, value in ad_dict.items():
                        if item in [
                                'allow multi-domain', 'cache last user logon',
                                'domain', 'forest', 'trust domain'
                        ]:
                            active_directory[item] = value
                except Exception:
                    self.logger().error('Error reading plist %s' %
                                        os.path.join(plist_path, archive))
                writer.writerow([active_directory[d] for d in headers])
        out.close()
        return []
Ejemplo n.º 15
0
    def run(self, path=""):
        search = GetFiles(self.config, vss=self.myflag("vss"))
        nusage = search.search("/netusage.sqlite$")
        output = os.path.join(self.myconfig('outdir'), "network_usage.txt")

        with open(output, "w") as out:
            for k in nusage:
                self.logger().info(
                    "Extracting information of file {}".format(k))
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str
                    c = conn.cursor()

                    out.write(
                        "{}\n------------------------------------------\n".
                        format(k))
                    query = '''SELECT pk.z_name as item_type, na.zidentifier as item_name, na.zfirsttimestamp as first_seen_date, na.ztimestamp as last_seen_date,
rp.ztimestamp as rp_date, rp.zbytesin, rp.zbytesout FROM znetworkattachment as na LEFT JOIN z_primarykey pk ON na.z_ent = pk.z_ent
LEFT JOIN zliverouteperf rp ON rp.zhasnetworkattachment = na.z_pk ORDER BY pk.z_name, zidentifier, rp_date desc;'''.replace(
                        '\n', ' ').upper()
                    c.execute(query)

                    out.write(
                        "\n\nitem_type|item_name|first_seen_date|last_seen_date|rp_date|ZBYTESIN|ZBYTESOUT\n--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6]))

                    query = '''SELECT pk.z_name as item_type ,p.zprocname as process_name, p.zfirsttimestamp as first_seen_date, p.ztimestamp as last_seen_date,
lu.ztimestamp as usage_since, lu.zwifiin, lu.zwifiout, lu.zwiredin, lu.zwiredout, lu.zwwanin, lu.zwwanout FROM zliveusage lu
LEFT JOIN zprocess p ON p.z_pk = lu.zhasprocess LEFT JOIN z_primarykey pk ON p.z_ent = pk.z_ent ORDER BY process_name;'''.replace(
                        '\n', ' ').upper()
                    c.execute(query)

                    out.write(
                        "\n\nitem_type|process_name|first_seen_date|last_seen_date|usage_since|ZWIFIIN|ZWIFIOUT|ZWIREDIN|ZWIREDOUT|ZWWANIN|ZWANOUT\n--|--|--|--|--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                            i[8], i[9], i[10]))
                    out.write("\n")
                    c.close()

        self.logger().info("Done parsing netusage.sqlite")
        return []
Ejemplo n.º 16
0
    def run(self, path=""):
        """ Get information of hiberfil.sys

        """
        volatility = self.config.get('plugins.common', 'volatility',
                                     '/usr/local/bin/vol.py')

        hiber_path = self.myconfig('outdir')
        check_folder(hiber_path)

        search = GetFiles(self.config, vss=self.myflag("vss"))
        hiberlist = search.search("/hiberfil.sys$")

        for h in hiberlist:
            aux = re.search(
                "{}/([^/]*)/".format(
                    base.utils.relative_path(self.myconfig('mountdir'),
                                             self.myconfig('casedir'))), h)
            partition = aux.group(1)

            hiber_raw = os.path.join(hiber_path,
                                     "hiberfil_{}.raw".format(partition))
            profile, version = self.get_win_profile(partition)
            with open(
                    os.path.join(hiber_path,
                                 "hiberinfo_{}.txt".format(partition)),
                    'w') as pf:
                pf.write("Profile: %s\nVersion: %s" % (profile, version))
            if version.startswith("5") or version.startswith(
                    "6.0") or version.startswith("6.1"):
                self.logger().info("Uncompressing {}".format(h))
                run_command([
                    volatility, "--profile={}".format(profile), "-f",
                    os.path.join(self.myconfig('casedir'), h), "imagecopy",
                    "-O", hiber_raw
                ],
                            logger=self.logger())
            else:
                self.logger().info(
                    "{} files could not be descompressed with a linux distro".
                    format(h))
                self.logger().info(
                    "Descompress with Windows 8 o higher hiberfil.sys file using https://arsenalrecon.com/weapons/hibernation-recon/"
                )
                self.logger().info("save output at {}".format(hiber_raw))
            self.vol_extract(hiber_raw, profile, version)
        return []
Ejemplo n.º 17
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        dsstore_files = search.search(r"/\.ds_store$")

        output1 = os.path.join(self.myconfig('outdir'), "dsstore_dump.txt")
        output2 = os.path.join(self.myconfig('outdir'), "dsstore.txt")

        with open(output1, 'w') as out1:
            filelist = set()
            n_stores = 0
            for dstores in dsstore_files:
                out1.write(
                    "{}\n-------------------------------\n".format(dstores))
                with open(os.path.join(self.myconfig('casedir'), dstores),
                          "rb") as ds:
                    try:
                        d = dsstore.DS_Store(ds.read(), debug=False)
                        files = d.traverse_root()
                        for f in files:
                            filelist.add(
                                os.path.join(os.path.dirname(dstores), f))
                            out1.write("%s\n" % f)
                    except Exception as exc:
                        self.logger().warning(
                            "Problems parsing file {}. Error: {}".format(
                                dstores, exc))
                n_stores += 1
                out1.write("\n")

        self.logger().info("Founded {} .DS_Store files".format(n_stores))

        with open(output2, "w") as out:
            for f in sorted(filelist):
                out.write("%s\n" % f)
        self.logger().info("ParseDSStore Done")
        return []
Ejemplo n.º 18
0
    def run(self, path=""):
        search = GetFiles(self.config, vss=self.myflag("vss"))
        quarantine = search.search(
            "/com.apple.LaunchServices.QuarantineEventsV2$")

        output = os.path.join(self.myconfig('outdir'), "quarantine.txt")

        with open(output, "w") as out:
            for k in quarantine:
                self.logger().info(
                    "Extracting information of file {}".format(k))
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str
                    c = conn.cursor()

                    out.write(
                        "{}\n------------------------------------------\n".
                        format(k))
                    query = '''SELECT LSQuarantineEventIdentifier as id, LSQuarantineTimeStamp as ts, LSQuarantineAgentBundleIdentifier as bundle,
LSQuarantineAgentName as agent_name, LSQuarantineDataURLString as data_url,
LSQuarantineSenderName as sender_name, LSQuarantineSenderAddress as sender_add, LSQuarantineTypeNumber as type_num,
LSQuarantineOriginTitle as o_title, LSQuarantineOriginURLString as o_url, LSQuarantineOriginAlias as o_alias
FROM LSQuarantineEvent  ORDER BY ts;'''.replace('\n', ' ')
                    c.execute(query)

                    out.write(
                        "\n\nid|ts|bundle|agent_name|data_url|sender_name|sender_add|type_num|o_title|o_url|o_alias\n--|--|--|--|--|--|--|--|--|--|--\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                            i[8], i[9], i[10]))
                    out.write("\n")
                    c.close()

        self.logger().info("Done parsing QuarantineEvents")
        return []
Ejemplo n.º 19
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        plist_files = search.search(r"\.plist$")

        plist_num = 0
        with open(os.path.join(self.myconfig('outdir'), "plist_dump.txt"),
                  'wb') as output:
            for pl in plist_files:
                plist_num += 1
                output.write("{}\n-------------------------------\n".format(
                    pl).encode())
                # try:
                #     text = subprocess.check_output(["plistutil", "-i", os.path.join(self.myconfig('mountdir'), pl)])
                #     output.write(text)
                #     output.write(b"\n\n")
                # except:
                #     self.logger().warning("Problems with file %s" % pl)
                #     output.write(b"\n\n")

                try:
                    plist = biplist.readPlist(
                        os.path.join(self.myconfig('casedir'), pl))
                    output.write(self.pprint(plist) + b"\n\n")
                except (biplist.InvalidPlistException,
                        biplist.NotBinaryPlistException):
                    self.logger().info("%s not a plist file or is corrupted" %
                                       pl)
                    output.write(b"\n\n")
                except Exception:
                    self.logger().info("Problems with file %s" % pl)

        self.logger().info("Founded {} plist files".format(plist_num))
        self.logger().info("Done parsing Plist")
        return []
Ejemplo n.º 20
0
class LnkExtract(base.job.BaseModule):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.dicID = load_appID(myconfig=self.myconfig)
        self.vss = self.myflag('vss')
        self.encoding = self.myconfig('encoding', 'cp1252')

    def read_config(self):
        super().read_config()
        # appid is a file relating applications id with names. https://github.com/EricZimmerman/JumpList/blob/master/JumpList/Resources/AppIDs.txt
        self.set_default_config(
            'appid',
            os.path.join(self.config.config['windows']['plugindir'],
                         'appID.txt'))

    def run(self, path=""):
        """ Parses lnk files, jumlists and customdestinations

        """
        self.logger().info("Extraction of lnk files")

        self.Files = GetFiles(self.config, vss=self.myflag("vss"))
        self.filesystem = FileSystem(self.config)
        self.mountdir = self.myconfig('mountdir')

        lnk_path = self.myconfig('{}outdir'.format('v' if self.vss else ''))
        check_folder(lnk_path)

        users = get_user_list(self.mountdir, self.vss)
        artifacts = {
            'lnk': {
                'filename': "{}_lnk.csv",
                'regex': r"{}/.*\.lnk$",
                'function': self.lnk_parser
            },
            'autodest': {
                'filename': "{}_jl.csv",
                'regex': r"{}/.*\.automaticDestinations-ms$",
                'function': self.automaticDest_parser
            },
            'customdest': {
                'filename': "{}_jlcustom.csv",
                'regex': r"{}/.*\.customDestinations-ms$",
                'function': self.customDest_parser
            }
        }

        for user in users:
            usr = "******".format(user.split("/")[0], user.split("/")[2])

            for a_name, artifact in artifacts.items():
                out_file = os.path.join(lnk_path,
                                        artifact['filename'].format(usr))
                files_list = list(
                    self.Files.search(artifact['regex'].format(user)))
                self.logger().info(
                    "Founded {} {} files for user {} at {}".format(
                        len(files_list), a_name,
                        user.split("/")[-1],
                        user.split("/")[0]))
                if len(files_list) > 0:
                    save_csv(artifact['function'](files_list),
                             config=self.config,
                             outfile=out_file,
                             quoting=0,
                             file_exists='OVERWRITE')
                    self.logger().info(
                        "{} extraction done for user {} at {}".format(
                            a_name,
                            user.split("/")[-1],
                            user.split("/")[0]))

        self.logger().info("RecentFiles extraction done")
        return []

    def lnk_parser(self, files_list):
        """ Parses all '.lnk' files found for a user.

        Parameters:
            files_list (list): list of automaticDestinations-ms files to parse (relative to casedir)
        """

        headers = [
            "mtime", "atime", "ctime", "btime", "drive_type", "drive_sn",
            "machine_id", "path", "network_path", "size", "atributes",
            "description", "command line arguments", "file_id", "volume_id",
            "birth_file_id", "birth_volume_id", "f_mtime", "f_atime",
            "f_ctime", "file"
        ]

        data = self.filesystem.get_macb(files_list, vss=self.vss)

        for file in files_list:
            lnk = Lnk(os.path.join(self.myconfig('casedir'), file),
                      self.encoding,
                      logger=self.logger())

            lnk = lnk.get_lnk_info()

            if lnk == -1:
                self.logger().warning("Problems with file {}".format(file))
                yield OrderedDict(
                    zip(
                        headers, data[file] + [
                            "", "", "", "", "", "", "", "", "", "", "", "", "",
                            "", "", "", file
                        ]))
            else:
                yield OrderedDict(zip(headers, data[file] + lnk + [file]))

    def automaticDest_parser(self, files_list):
        """ Parses automaticDest files

        Parameters:
            files_list (list): list of automaticDestinations-ms files to parse
        """

        # TODO: Get the default Windows encoding and avoid trying many
        # TODO: Parse the files without DestList

        # Differences in DestList between versions at:
        # https://cyberforensicator.com/wp-content/uploads/2017/01/1-s2.0-S1742287616300202-main.2-14.pdf
        # Obtain the JumpList version from the header of DestList entry
        for jl in files_list:
            try:
                ole = olefile.OleFileIO(
                    os.path.join(self.myconfig('casedir'), jl))
            except Exception as exc:
                self.logger().warning(
                    "Problems creating OleFileIO with file {}\n{}".format(
                        jl, exc))
                continue
            try:
                data = ole.openstream('DestList').read()
                header_version, = struct.unpack('<L', data[0:4])
                version = 'w10' if header_version >= 3 else 'w7'
                self.logger().info(
                    "Windows version of Jumplists: {}".format(version))
                break
            except Exception:
                continue
            finally:
                ole.close()
        if 'version' not in locals():
            self.logger().warning(
                "Can't determine windows version. Assuming w10")
            version = 'w10'  # default

        # Offsets for diferent versions
        entry_ofs = {'w10': 130, 'w7': 114}
        id_entry_ofs = {'w10': ['<L', 88, 92], 'w7': ['<Q', 88, 96]}
        sz_ofs = {'w10': [128, 130], 'w7': [112, 114]}
        final_ofs = {'w10': 4, 'w7': 0}

        headers = [
            "Open date", "Application", "drive_type", "drive_sn", "machine_id",
            "path", "network_path", "size", "atributes", "description",
            "command line arguments", "file_id", "volume_id", "birth_file_id",
            "birth_volume_id", "f_mtime", "f_atime", "f_ctime", "file"
        ]

        # Main loop
        for jl in files_list:
            self.logger().info("Processing Jump list : {}".format(
                jl.split('/')[-1]))
            try:
                ole = olefile.OleFileIO(
                    os.path.join(self.myconfig('casedir'), jl))
            except Exception as exc:
                self.logger().warning(
                    "Problems creating OleFileIO with file {}\n{}".format(
                        jl, exc))
                continue

            if not ole.exists('DestList'):
                self.logger().warning(
                    "File {} does not have a DestList entry and can't be parsed"
                    .format(jl))
                ole.close()
                continue
            else:
                if not (len(ole.listdir()) - 1):
                    self.logger().warning(
                        "Olefile has detected 0 entries in file {}\nFile will be skipped"
                        .format(jl))
                    ole.close()
                    continue

                dest = ole.openstream('DestList')
                data = dest.read()
                if len(data) == 0:
                    self.logger().warning(
                        "No DestList data in file {}\nFile will be skipped".
                        format(jl))
                    ole.close()
                    continue
                self.logger().debug("DestList lenght: {}".format(
                    ole.get_size("DestList")))

                try:
                    # Double check number of entries
                    current_entries, pinned_entries = struct.unpack(
                        "<LL", data[4:12])
                    self.logger().debug(
                        "Current entries: {}".format(current_entries))
                except Exception as exc:
                    self.logger().warning(
                        "Problems unpacking header Destlist with file {}\n{}".
                        format(jl, exc))
                    # continue

                ofs = 32  # Header offset
                while ofs < len(data):
                    stream = data[ofs:ofs + entry_ofs[version]]
                    name = ""
                    try:
                        name = stream[72:88].decode()
                    except Exception:
                        self.logger().info("utf-8 decoding failed")
                        try:
                            name = stream[72:88].decode("cp1252")
                        except Exception as exc:
                            self.logger().info("cp1252 decoding failed")
                            self.logger().warning(
                                "Problems decoding name with file {}\n{}".
                                format(jl, exc))

                    name = name.replace("\00", "")

                    # Get id_entry of next entry
                    try:
                        id_entry, = struct.unpack(
                            id_entry_ofs[version][0],
                            stream[id_entry_ofs[version][1]:
                                   id_entry_ofs[version][2]])
                    except Exception as exc:
                        self.logger().warning(
                            "Problems unpacking id_entry with file {}\n{}".
                            format(jl, exc))
                        # self.logger().debug(stream[id_entry_ofs[version][1]:id_entry_ofs[version][2]])
                        break
                    id_entry = format(id_entry, '0x')

                    # Get MSFILETIME
                    try:
                        time0, time1 = struct.unpack("II", stream[100:108])
                    except Exception as exc:
                        self.logger().warning(
                            "Problems unpacking MSFILETIME with file {}\n{}".
                            format(jl, exc))
                        break

                    timestamp = getFileTime(time0, time1)

                    # sz: Length of Unicodestring data
                    try:
                        sz, = struct.unpack(
                            "h", stream[sz_ofs[version][0]:sz_ofs[version][1]])
                        # self.logger().debug("sz: {}".format(sz))
                    except Exception as exc:
                        self.logger().warning(
                            "Problems unpaking unicode string size with file {}\n{}"
                            .format(jl, exc))
                        # self.logger().debug(stream[sz_ofs[version][0]:sz_ofs[version][1]])
                        break

                    ofs += entry_ofs[version]
                    sz2 = sz * 2  # Unicode 2 bytes

                    # Get unicode path
                    path = ""
                    try:
                        path = data[ofs:ofs + sz2].decode()
                    except UnicodeDecodeError:
                        try:
                            path = data[ofs:ofs + sz2].decode("iso8859-15")
                        except Exception as exc:
                            self.logger().warning(
                                "Problems decoding path with file {}\n{}".
                                format(jl, exc))
                    path = path.replace("\00", "")

                    temp = tempfile.NamedTemporaryFile()
                    # Move to the next entry
                    ofs += sz2 + final_ofs[version]
                    try:
                        aux = ole.openstream(id_entry)
                    except Exception as exc:
                        self.logger().warning(
                            "Problems with file {}\n{}".format(jl, exc))
                        self.logger().warning("ole.openstream failed")
                        temp.close()
                        break
                    datos = aux.read()
                    temp.write(datos)
                    temp.flush()

                    # Extract lnk data
                    lnk = Lnk(temp.name, self.encoding, logger=self.logger())
                    lnk = lnk.get_lnk_info()

                    temp.close()

                    n_hash = os.path.basename(jl).split(".")[0]
                    if lnk == -1:
                        yield OrderedDict(
                            zip(headers, [
                                time.strftime("%Y-%m-%dT%H:%M:%SZ",
                                              time.gmtime(timestamp)),
                                self.dicID.get(n_hash, n_hash), "", "", "", "",
                                "", "", "", "", "", "", "", "", "", "", "", jl
                            ]))
                    else:
                        yield OrderedDict(
                            zip(headers, [
                                time.strftime("%Y-%m-%dT%H:%M:%SZ",
                                              time.gmtime(timestamp)),
                                self.dicID.get(n_hash, n_hash)
                            ] + lnk + [jl]))

            ole.close()

        self.logger().info("Jumlists parsed")

    def customDest_parser(self, files_list):
        """ Parses customDest files

        Parameters:
            files_list (list): list of customDestinations-ms files to parse
        """
        # regex = re.compile("\x4C\x00\x00\x00\x01\x14\x02\x00")
        split_str = b"\x4C\x00\x00\x00\x01\x14\x02\x00"

        headers = [
            "Application", "drive_type", "drive_sn", "machine_id", "path",
            "network_path", "size", "atributes", "description",
            "command line arguments", "file_id", "volume_id", "birth_file_id",
            "birth_volume_id", "f_mtime", "f_atime", "f_ctime", "file"
        ]

        for jl in files_list:
            with open(os.path.join(self.myconfig('casedir'), jl), "rb") as f:
                data = f.read()

            lnks = data.split(split_str)
            for lnk_b in lnks[1:]:
                f_temp = tempfile.NamedTemporaryFile()
                f_temp.write(b"\x4C\x00\x00\x00\x01\x14\x02\x00" + lnk_b)
                f_temp.flush()
                lnk = Lnk(f_temp.name, self.encoding, logger=self.logger())
                lnk = lnk.get_lnk_info()
                f_temp.close()

                n_hash = os.path.basename(jl).split(".")[0]
                if lnk == -1:
                    yield OrderedDict(
                        zip(headers, [
                            self.dicID.get(n_hash, n_hash), "", "", "", "", "",
                            "", "", "", "", "", "", "", jl
                        ]))
                else:
                    yield OrderedDict(
                        zip(headers,
                            [self.dicID.get(n_hash, n_hash)] + lnk + [jl]))

        self.logger().info("customDestinations parsed")
Ejemplo n.º 21
0
class AmCache(base.job.BaseModule):
    """ Parses Amcache.hve registry hive. """
    def run(self, path=""):
        vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=vss)

        outfolder = self.myconfig('voutdir') if vss else self.myconfig(
            'outdir')
        check_directory(outfolder, create=True)

        amcache_hives = [path] if path else self.search.search("Amcache.hve$")
        for am_file in amcache_hives:
            self.amcache_path = os.path.join(self.myconfig('casedir'), am_file)
            partition = am_file.split("/")[2]
            self.logger().info("Parsing {}".format(am_file))
            self.outfile = os.path.join(outfolder,
                                        "amcache_{}.csv".format(partition))

            try:
                reg = Registry.Registry(
                    os.path.join(self.myconfig('casedir'), am_file))
                entries = self.parse_amcache_entries(reg)
                save_csv(entries,
                         outfile=self.outfile,
                         file_exists='OVERWRITE',
                         quoting=0)
            except KeyError:
                self.logger().warning(
                    "Expected subkeys not found in hive file: {}".format(
                        am_file))
            except Exception as exc:
                self.logger().warning("Problems parsing: {}. Error: {}".format(
                    am_file, exc))

        self.logger().info("Amcache.hve parsing finished")
        return []

    def parse_amcache_entries(self, registry):
        """ Return a generator of dictionaries describing each entry in the hive.

        Fields:
            * KeyLastWrite: Possible application first executed time (must be tested)
            * AppPath: application path inside the volume
            * AppName: friendly name for application, if any
            * Sha1Hash: binary file SHA-1 hash value
            * GUID: Volume GUID the application was executed from
        """
        # Hive subkeys may have two different subkeys
        #   * {GUID}\\Root\\File
        #   * {GUID}\\Root\\InventoryApplicationFile
        found_key = ''
        structures = {
            'File': self._parse_File_entries,
            'InventoryApplicationFile': self._parse_IAF_entries
        }
        for key, func in structures.items():
            try:
                volumes = registry.open("Root\\{}".format(key))
                found_key = key
                self.logger().debug(
                    'Parsing entries in key: Root\\{}'.format(key))
                for app in func(volumes):
                    yield app
            except Registry.RegistryKeyNotFoundException:
                self.logger().info('Key "Root\\{}" not found'.format(key))

        if not found_key:
            raise KeyError

    def _parse_File_entries(self, volumes):
        """ Parses File subkey entries for amcache hive """
        fields = {
            'LastModified': "17",
            'AppPath': "15",
            'AppName': "0",
            'Sha1Hash': "101"
        }
        for volumekey in volumes.subkeys():
            for filekey in volumekey.subkeys():
                app = OrderedDict([('KeyLastWrite', WINDOWS_TIMESTAMP_ZERO),
                                   ('AppPath', ''), ('AppName', ''),
                                   ('Sha1Hash', ''),
                                   ('LastModified', WINDOWS_TIMESTAMP_ZERO),
                                   ('GUID', '')])
                app['GUID'] = volumekey.path().split('}')[0][1:]
                app['KeyLastWrite'] = filekey.timestamp()
                for f in fields:
                    try:
                        val = filekey.value(fields[f]).value()
                        if f == 'Sha1Hash':
                            val = val[4:]
                        elif f == 'LastModified':
                            val = parse_windows_timestamp(val).strftime(
                                "%Y-%m-%d %H:%M:%S")
                        app.update({f: val})
                    except Registry.RegistryValueNotFoundException:
                        pass
                yield app

    def _parse_IAF_entries(self, volumes):
        """ Parses InventoryApplicationFile subkey entries for amcache hive.

        Yields: dict with keys'FirstRun','AppPath') """
        names = {
            'LowerCaseLongPath': 'AppPath',
            'FileId': 'Sha1Hash',
            'ProductName': 'AppName'
        }
        for volumekey in volumes.subkeys():
            app = OrderedDict([('KeyLastWrite', WINDOWS_TIMESTAMP_ZERO),
                               ('AppPath', ''), ('AppName', ''),
                               ('Sha1Hash', ''),
                               ('LastModified', WINDOWS_TIMESTAMP_ZERO),
                               ('GUID', '')])
            app['GUID'] = volumekey.path().split('}')[0][1:]
            app['KeyLastWrite'] = volumekey.timestamp()
            for v in volumekey.values():
                if v.name() in ['LowerCaseLongPath', 'ProductName']:
                    app.update({names.get(v.name(), v.name()): v.value()})
                elif v.name() == 'FileId':
                    sha = v.value()[
                        4:]  # SHA-1 hash is registered 4 0's padded
                    app.update({names.get(v.name(), v.name()): sha})
            yield app
Ejemplo n.º 22
0
    def GetDhcpInfo(self):
        '''Read dhcp leases & interface entries

           Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        interfaces_path = search.search("/private/var/db/dhcpclient/leases$")

        out = open(os.path.join(self.myconfig('outdir'), 'Network_DHCP.csv'),
                   'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "Interface", "MAC_Address", "IPAddress", "LeaseLength",
            "LeaseStartDate", "PacketData", "RouterHardwareAddress",
            "RouterIPAddress", "SSID", "Source"
        ]
        writer.writerow(headers)

        for interface in interfaces_path:
            for name in sorted(
                    os.listdir(
                        os.path.join(self.myconfig('casedir'), interface))):
                if name.find(",") > 0:
                    # Process plist
                    name_no_ext = os.path.splitext(
                        name
                    )[0]  # not needed as there is no .plist extension on these files
                    if_name, mac_address = name_no_ext.split(",")
                    self.logger().info(
                        "Found mac address = {} on interface {}".format(
                            mac_address, if_name))

                    self.logger().debug("Trying to read {}".format(name))

                    plist = biplist.readPlist(
                        os.path.join(self.myconfig('casedir'), interface,
                                     name))
                    interface_info = {}
                    for c in headers:
                        interface_info[c] = ""
                    interface_info['Source'] = os.path.join(
                        '/private/var/db/dhcpclient/leases', name)
                    interface_info['Interface'] = if_name
                    interface_info['MAC_Address'] = mac_address

                    for item, value in plist.items():
                        if item in ('IPAddress', 'LeaseLength',
                                    'LeaseStartDate', 'RouterIPAddress',
                                    'SSID'):
                            interface_info[item] = value
                        elif item == 'RouterHardwareAddress':  # convert binary blob to MAC address
                            data = value.hex().upper()
                            data = [data[2 * n:2 * n + 2] for n in range(6)]
                            interface_info[item] = ":".join(data)
                        elif item == 'PacketData':
                            interface_info['PacketData'] = value.hex().upper()
                        else:
                            self.logger().info(
                                "Found unknown item in plist: ITEM=" + item +
                                " VALUE=" + str(value))
                    writer.writerow([interface_info[c] for c in headers])
                else:
                    self.logger().info(
                        "Found unexpected file, not processing /private/var/db/dhcpclient/leases/{} size={}"
                        .format(name, str(interface['size'])))
            # Done processing interfaces!
        out.close()
Ejemplo n.º 23
0
    def GetNetworkInterface2Info(self):
        '''Read interface info from /Library/Preferences/SystemConfiguration/preferences.plist

        Based on mac_apt plugin from https://github.com/ydkhatri/mac_apt
        '''
        search = GetFiles(self.config, vss=self.myflag("vss"))
        network = search.search(
            "/Library/Preferences/SystemConfiguration/preferences.plist$")

        with open(os.path.join(self.myconfig('outdir'), 'Network_Details.csv'),
                  'w') as out:
            writer = csv.writer(out, delimiter="|", quotechar='"')
            headers = [
                "UUID", "IPv4.ConfigMethod", "IPv6.ConfigMethod", "DeviceName",
                "Hardware", "Type", "SubType", "UserDefinedName",
                "Proxies.ExceptionsList", "SMB.NetBIOSName", "SMB.Workgroup",
                "PPP", "Modem"
            ]
            writer.writerow(headers)
            for net in network:
                plist = biplist.readPlist(
                    os.path.join(self.myconfig('casedir'), net))
                for uuid in plist['NetworkServices'].keys():
                    data = [uuid] + [""] * 12
                    if 'IPv4' in plist['NetworkServices'][uuid].keys():
                        data[1] = plist['NetworkServices'][uuid]['IPv4'][
                            'ConfigMethod']
                    if 'IPv6' in plist['NetworkServices'][uuid].keys():
                        data[2] = plist['NetworkServices'][uuid]['IPv6'][
                            'ConfigMethod']
                    if 'Interface' in plist['NetworkServices'][uuid].keys():
                        data[3] = plist['NetworkServices'][uuid]['Interface'][
                            'DeviceName']
                        data[4] = plist['NetworkServices'][uuid]['Interface'][
                            'Hardware']
                        data[5] = plist['NetworkServices'][uuid]['Interface'][
                            'Type']
                        if 'SubType' in plist['NetworkServices'][uuid][
                                'Interface'].keys():
                            data[6] = plist['NetworkServices'][uuid][
                                'Interface']['SubType']
                        data[7] = plist['NetworkServices'][uuid]['Interface'][
                            'UserDefinedName']

                    if 'Proxies' in plist['NetworkServices'][uuid].keys(
                    ) and 'ExceptionsList' in plist['NetworkServices'][uuid][
                            'Proxies'].keys():
                        data[8] = ",".join(plist['NetworkServices'][uuid]
                                           ['Proxies']['ExceptionsList'])
                    if 'SMB' in plist['NetworkServices'][uuid].keys():
                        try:
                            data[9] = plist['NetworkServices'][uuid]['SMB'][
                                'NetBIOSName']
                            data[10] = plist['NetworkServices'][uuid]['SMB'][
                                'Workgroup']
                        except Exception:
                            pass
                    if 'PPP' in plist['NetworkServices'][uuid].keys():
                        data[11] = str(plist['NetworkServices'][uuid]['PPP'])
                    if 'Modem' in plist['NetworkServices'][uuid].keys():
                        data[12] = str(plist['NetworkServices'][uuid]['Modem'])
                    writer.writerow(data)
Ejemplo n.º 24
0
    def GetNetworkInterfaceInfo(self):
        '''Read interface info from NetworkInterfaces.plist
        modified from networking plugin from https://github.com/ydkhatri/mac_apt'''

        search = GetFiles(self.config, vss=self.myflag("vss"))
        network = search.search(
            "/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist$"
        )
        classes = [
            'Active', 'BSD Name', 'IOBuiltin', 'IOInterfaceNamePrefix',
            'IOInterfaceType', 'IOInterfaceUnit', 'IOPathMatch',
            'SCNetworkInterfaceType'
        ]

        out = open(
            os.path.join(self.myconfig('outdir'), 'Network_Interfaces.csv'),
            'w')
        writer = csv.writer(out, delimiter="|", quotechar='"')
        headers = [
            "Category", "Active", "BSD Name", "IOBuiltin",
            "IOInterfaceNamePrefix", "IOInterfaceType", "IOInterfaceUnit",
            "IOMACAddress", "IOPathMatch", "SCNetworkInterfaceInfo",
            "SCNetworkInterfaceType", "Source"
        ]
        writer.writerow(headers)

        for net in network:
            self.logger().debug("Trying to read {}".format(net))
            # try:
            plist = biplist.readPlist(
                os.path.join(self.myconfig('casedir'), net))
            try:
                self.logger().info("Model = %s" % plist['Model'])
            except Exception:
                pass
            for category, cat_array in plist.items(
            ):  # value is another array in this dict
                if not category.startswith('Interface'):
                    if category != 'Model':
                        self.logger().debug('Skipping %s' % category)
                    continue
                for interface in cat_array:
                    interface_info = {'Category': category, 'Source': net}
                    for c in classes:
                        interface_info[c] = ""
                    for item, value in interface.items():
                        if item in classes:
                            interface_info[item] = value
                        elif item == 'IOMACAddress':  # convert binary blob to MAC address
                            data = value.hex().upper()
                            data = [data[2 * n:2 * n + 2] for n in range(6)]
                            interface_info[item] = ":".join(data)
                        elif item == 'SCNetworkInterfaceInfo':
                            try:
                                interface_info[
                                    'SCNetworkInterfaceInfo'] = value[
                                        'UserDefinedName']
                            except Exception:
                                pass
                        else:
                            self.logger().info(
                                "Found unknown item in plist: ITEM=" + item +
                                " VALUE=" + str(value))
                    writer.writerow([interface_info[c] for c in headers])
        out.close()
Ejemplo n.º 25
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        info_path = self.myconfig('outdir')
        check_folder(info_path)
        search = GetFiles(self.config, vss=self.myflag("vss"))
        asl_files = list(search.search(r"var/log/asl/.*\.asl$"))

        # asl dump
        with open(os.path.join(info_path, "asldump.csv"), "w") as out_asl:
            writer = csv.writer(out_asl, delimiter="|", quotechar='"')
            headers = [
                "Timestamp", "Host", "Sender", "PID", "Reference Process",
                "Reference PID", "Facility", "Level", "Message",
                "Other details"
            ]
            writer.writerow(headers)
            for file in asl_files:
                self.logger().info("Processing: {}".format(file))
                try:
                    f = open(os.path.join(self.myconfig('casedir'), file),
                             "rb")
                except IOError as e:
                    self.logger().error(
                        "Could not open file '{}' ({}): Skipping this file".
                        format(file, e))
                    continue

                try:
                    db = ccl_asldb.AslDb(f)
                except ccl_asldb.AslDbError as e:
                    self.logger().error(
                        "Could not read file as ASL DB '{}' ({}): Skipping this file"
                        .format(file, e))
                    f.close()
                    continue

                for record in db:
                    writer.writerow([
                        record.timestamp.isoformat(), record.host,
                        record.sender,
                        str(record.pid),
                        str(record.refproc),
                        str(record.refpid), record.facility, record.level_str,
                        record.message.replace("\n",
                                               " ").replace("\t", "    "),
                        "; ".join([
                            "{0}='{1}'".format(key, record.key_value_dict[key])
                            for key in record.key_value_dict
                        ]).replace("\n", " ").replace("\t", "    ")
                    ])
                f.close()

        asl_path = list(set(os.path.dirname(asl) for asl in asl_files))

        for path in asl_path:
            self.logger().info("Processing files from folder: {}".format(path))
            OSX_asl_login_timeline.__dowork__(
                (os.path.join(self.myconfig('casedir'), path), ),
                (os.path.join(self.myconfig('outdir'), "login_power.md"), ))
        self.logger().info("Done ASL")
        return []
Ejemplo n.º 26
0
    def get_hive_files(self, path):
        """ Retrieves all hives found in source if path is not specified.

            Attrs:
                path: path to registry hive
        """
        if path:
            if os.path.exists(path):
                return path
            else:
                raise base.job.RVTError('path {} does not exist'.format(path))

        check_directory(self.myconfig('mountdir'), error_missing=True)

        regfiles = {}

        Find = GetFiles(self.config, vss=self.myflag("vss"))

        for main_hive in ['SYSTEM', 'SOFTWARE', 'SAM', 'SECURITY']:
            for item in Find.search(
                    "/Windows/System32/config/{}$".format(main_hive)):
                hive = item.split('/')[-1].lower()
                if hive not in regfiles:  # Get only the first hit
                    regfiles[hive] = os.path.join(self.myconfig('casedir'),
                                                  item)

        if "software" not in regfiles.keys():
            self.logger().warning('No SOFTWARE hive found in source')
            return {}

        NTUSER = Find.search(
            r"/(Documents and settings|users)/.*/(NTUSER|UsrClass)\.dat$")

        usr = defaultdict(list)
        regfiles["ntuser"] = {}
        regfiles["usrclass"] = {}

        for item in NTUSER:
            aux = re.search("(Documents and settings|Users)/([^/]*)/", item,
                            re.I)
            user = aux.group(2)
            hive_name = 'ntuser' if item.lower().endswith(
                "ntuser.dat") else 'usrclass'
            if user not in usr[hive_name]:
                usr[hive_name].append(user)
            else:  # Get only the first hit
                continue
            if hive_name == "ntuser":
                regfiles["ntuser"][user] = os.path.join(
                    self.myconfig('casedir'), item)
            else:
                regfiles["usrclass"][user] = os.path.join(
                    self.myconfig('casedir'), item)

        amcache = list(Find.search("/Windows/AppCompat/Programs/Amcache.hve"))
        if len(amcache) != 0:
            regfiles["amcache"] = os.path.join(self.myconfig('casedir'),
                                               amcache[0])
        syscache = list(Find.search(r"/syscache.hve$"))
        if len(syscache) != 0:
            regfiles["syscache"] = os.path.join(self.myconfig('casedir'),
                                                syscache[0])

        return regfiles
Ejemplo n.º 27
0
    def run(self, path=""):
        if not os.path.isdir(self.myconfig('mountdir')):
            raise base.job.RVTError("Folder {} not exists".format(
                self.myconfig('mountdir')))

        search = GetFiles(self.config, vss=self.myflag("vss"))
        knowledgec = search.search("/knowledgec.db$")

        knowledgec_path = self.myconfig('outdir')
        check_folder(knowledgec_path)

        for k in knowledgec:
            self.logger().info("Processing file {}".format(k))
            if k.find('/Users/') < 0:
                output = os.path.join(knowledgec_path, "private.txt")
            else:
                aux = re.search("/Users/([^/]+)", k)
                output = os.path.join(knowledgec_path,
                                      "{}.txt".format(aux.group(1)))

            with open(output, "w") as out:
                with sqlite3.connect('file://{}?mode=ro'.format(
                        os.path.join(self.myconfig('casedir'), k)),
                                     uri=True) as conn:
                    conn.text_factory = str

                    c = conn.cursor()
                    c.execute(
                        'SELECT DISTINCT ZOBJECT.ZSTREAMNAME FROM ZOBJECT ORDER BY ZSTREAMNAME;'
                    )

                    for i in c.fetchall():
                        out.write("{}\n".format(i[0]))

                    c.execute(
                        '''SELECT datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK",ZOBJECT.ZSECONDSFROMGMT/3600 AS "GMT OFFSET", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START",
datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END", (ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS",
ZOBJECT.ZSTREAMNAME,ZOBJECT.ZVALUESTRING FROM ZOBJECT WHERE ZSTREAMNAME IS "/app/inFocus" ORDER BY "START";'''
                    )

                    out.write(
                        "\n\nENTRY CREATION|DAY OF WEEK|GMT OFFSET|START|END|USAGE IN SECONDS|ZSTREAMNAME|ZVALUESTRING\n"
                    )
                    for i in c.fetchall():
                        out.write("{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                            i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))

                    c.execute('''SELECT
datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", ZOBJECT.ZSECONDSFROMGMT/3600 AS "GMT OFFSET",
CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START",
datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END", (ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS", ZOBJECT.ZSTREAMNAME,
ZOBJECT.ZVALUESTRING, ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__ACTIVITYTYPE AS "ACTIVITY TYPE",
ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__TITLE as "TITLE", ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__USERACTIVITYREQUIREDSTRING as "ACTIVITY STRING",
datetime(ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__EXPIRATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "EXPIRATION DATE"
FROM ZOBJECT left join ZSTRUCTUREDMETADATA on ZOBJECT.ZSTRUCTUREDMETADATA = ZSTRUCTUREDMETADATA.Z_PK WHERE ZSTREAMNAME is "/app/activity" or ZSTREAMNAME is "/app/inFocus"
ORDER BY "START";''')

                    out.write(
                        "\n\nENTRY CREATION|GMT OFFSET|DAY OF WEEK|START|END|USAGE IN SECONDS|ZSTREAMNAME|ZVALUESTRING|ACTIVITY TYPE|TITLE|ACTIVITY STRING|EXPIRATION DATE\n"
                    )
                    for i in c.fetchall():
                        out.write(
                            "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                                i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                                i[8], i[9], i[10], i[11]))

                    c.execute('''SELECT
datetime(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "ENTRY CREATION", CASE ZOBJECT.ZSTARTDAYOFWEEK
    WHEN "1" THEN "Sunday"
    WHEN "2" THEN "Monday"
    WHEN "3" THEN "Tuesday"
    WHEN "4" THEN "Wednesday"
    WHEN "5" THEN "Thursday"
    WHEN "6" THEN "Friday"
    WHEN "7" THEN "Saturday"
END "DAY OF WEEK", datetime(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "START", datetime(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "END",
(ZOBJECT.ZENDDATE-ZOBJECT.ZSTARTDATE) as "USAGE IN SECONDS", ZOBJECT.ZSTREAMNAME, ZOBJECT.ZVALUESTRING, ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__ACTIVITYTYPE AS "ACTIVITY TYPE",
ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__TITLE as "TITLE", ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__USERACTIVITYREQUIREDSTRING as "ACTIVITY STRING",
datetime(ZSTRUCTUREDMETADATA.Z_DKAPPLICATIONACTIVITYMETADATAKEY__EXPIRATIONDATE+978307200,'UNIXEPOCH', 'LOCALTIME') as "EXPIRATION DATE",
ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__INTENTCLASS as "INTENT CLASS", ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__INTENTVERB as "INTENT VERB",
ZSTRUCTUREDMETADATA.Z_DKINTENTMETADATAKEY__SERIALIZEDINTERACTION as "SERIALIZED INTERACTION", ZSOURCE.ZBUNDLEID FROM ZOBJECT
left join ZSTRUCTUREDMETADATA on ZOBJECT.ZSTRUCTUREDMETADATA = ZSTRUCTUREDMETADATA.Z_PK left join ZSOURCE on ZOBJECT.ZSOURCE = ZSOURCE.Z_PK
WHERE ZSTREAMNAME is "/app/activity" or ZSTREAMNAME is "/app/inFocus" or ZSTREAMNAME is "/app/intents" ORDER BY "START";'''
                              )

                    out.write(
                        "\n\nENTRY CREATION|DAY OF WEEK|START|END|TITLE|ACTIVITY STRING|EXPIRATION DATE|INTENT CLASS|INTENT VERB|SERIALIZED INTERACTION|ZBUNDLEID\n"
                    )
                    for i in c.fetchall():
                        out.write(
                            "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n".format(
                                i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7],
                                i[8], i[9], i[10], i[11]))

        self.logger().info("Done parsing KnowledgeC")
        return []
Ejemplo n.º 28
0
class ScheduledTasks(base.job.BaseModule):
    """ Parses job files and schedlgu.txt. """
    def run(self, path=""):
        self.vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.outfolder = self.myconfig(
            'voutdir') if self.vss else self.myconfig('outdir')
        check_directory(self.outfolder, create=True)

        self.logger().info(
            "Parsing artifacts from scheduled tasks files (.job)")
        self.parse_Task()
        self.logger().info(
            "Parsing artifacts from Task Scheduler Service log files (schedlgu.txt)"
        )
        self.parse_schedlgu()
        return []

    def parse_Task(self):
        jobs_files = list(self.search.search(r"\.job$"))
        partition_list = set()
        for f in jobs_files:
            partition_list.add(f.split("/")[2])

        f = {}
        csv_files = {}
        writers = {}

        for p in partition_list:
            csv_files[p] = open(
                os.path.join(self.outfolder, "jobs_files_%s.csv" % p), "w")
            writers[p] = csv.writer(csv_files[p], delimiter=";", quotechar='"')
            writers[p].writerow([
                "Product Info", "File Version", "UUID", "Maximum Run Time",
                "Exit Code", "Status", "Flags", "Date Run",
                "Running Instances", "Application", "Working Directory",
                "User", "Comment", "Scheduled Date"
            ])

        for file in jobs_files:
            partition = file.split("/")[2]
            with open(os.path.join(self.myconfig('casedir'), file), "rb") as f:
                data = f.read()
            job = jobparser.Job(data)
            writers[partition].writerow([
                jobparser.products.get(job.ProductInfo), job.FileVersion,
                job.UUID, job.MaxRunTime, job.ExitCode,
                jobparser.task_status.get(job.Status, "Unknown Status"),
                job.Flags_verbose, job.RunDate, job.RunningInstanceCount,
                "{} {}".format(job.Name, job.Parameter), job.WorkingDirectory,
                job.User, job.Comment, job.ScheduledDate
            ])
        for csv_file in csv_files.values():
            csv_file.close()

        self.logger().info("Finished extraction from scheduled tasks .job")

    def parse_schedlgu(self):
        sched_files = list(self.search.search(r"schedlgu\.txt$"))
        for file in sched_files:
            partition = file.split("/")[2]
            save_csv(self._parse_schedlgu(
                os.path.join(self.myconfig('casedir'), file)),
                     outfile=os.path.join(self.outfolder,
                                          'schedlgu_{}.csv'.format(partition)),
                     file_exists='OVERWRITE',
                     quoting=0)
        self.logger().info("Finished extraction from schedlgu.txt")

    def _parse_schedlgu(self, file):
        with open(file, 'r', encoding='utf16') as sched:
            dates = {
                'start': WINDOWS_TIMESTAMP_ZERO,
                'end': WINDOWS_TIMESTAMP_ZERO
            }
            parsed_entry = False
            for line in sched:
                if line == '\n':
                    continue
                elif line.startswith('"'):
                    service = line.rstrip('\n').strip('"')
                    if parsed_entry:
                        yield OrderedDict([('Service', service),
                                           ('Started', dates['start']),
                                           ('Finished', dates['end'])])
                    parsed_entry = False
                    dates = {
                        'start': WINDOWS_TIMESTAMP_ZERO,
                        'end': WINDOWS_TIMESTAMP_ZERO
                    }
                    continue
                for state, words in {
                        'start': ['Started', 'Iniciado'],
                        'end': ['Finished', 'Finalizado']
                }.items():
                    for word in words:
                        if line.startswith('\t{}'.format(word)):
                            try:
                                dates[state] = dateutil.parser.parse(
                                    line[re.search(r'\d', line).span()[0]:].
                                    rstrip('\n')).strftime("%Y-%m-%d %H:%M:%S")
                                parsed_entry = True
                            except Exception:
                                pass
                            break