示例#1
0
    def run(self, path=""):
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.vss = self.myflag('vss')
        self.logger().info("Parsing ShimCache from registry")

        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        SYSTEM = list(self.search.search(r"windows/System32/config/SYSTEM$"))
        check_directory(outfolder, create=True)

        partition_list = set()
        for f in SYSTEM:
            aux = re.search(r"([vp\d]*)/windows/System32/config", f, re.I)
            partition_list.add(aux.group(1))

        output_files = {
            p: os.path.join(outfolder, "shimcache_%s.csv" % p)
            for p in partition_list
        }

        for f in SYSTEM:
            save_csv(self.parse_ShimCache_hive(f),
                     outfile=output_files[f.split("/")[2]],
                     file_exists='OVERWRITE',
                     quoting=0)

        self.logger().info("Finished extraction from ShimCache")
        return []
示例#2
0
    def run(self, path=""):
        vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=vss)

        outfolder = self.myconfig('voutdir') if vss else self.myconfig(
            'outdir')
        check_directory(outfolder, create=True)

        amcache_hives = [path] if path else self.search.search("Amcache.hve$")
        for am_file in amcache_hives:
            self.amcache_path = os.path.join(self.myconfig('casedir'), am_file)
            partition = am_file.split("/")[2]
            self.logger().info("Parsing {}".format(am_file))
            self.outfile = os.path.join(outfolder,
                                        "amcache_{}.csv".format(partition))

            try:
                reg = Registry.Registry(
                    os.path.join(self.myconfig('casedir'), am_file))
                entries = self.parse_amcache_entries(reg)
                save_csv(entries,
                         outfile=self.outfile,
                         file_exists='OVERWRITE',
                         quoting=0)
            except KeyError:
                self.logger().warning(
                    "Expected subkeys not found in hive file: {}".format(
                        am_file))
            except Exception as exc:
                self.logger().warning("Problems parsing: {}. Error: {}".format(
                    am_file, exc))

        self.logger().info("Amcache.hve parsing finished")
        return []
示例#3
0
文件: __init__.py 项目: XorgX304/rvt2
    def run(self, path):
        self.check_params(path, check_path=True)
        output_dir = self.myconfig('outdir')
        check_directory(output_dir, create=True)

        if not os.path.isabs(path):
            path = os.path.join(self.myconfig('casedir'), path)

        try:
            frame_rate = self._snapshowFrequency(path)
            export_path = os.path.join(
                output_dir, relative_path(path, self.myconfig('casedir')))
            self.logger().debug('path=%s frame_rate=%s export_path=%s', path,
                                frame_rate, export_path)

            check_directory(export_path, create=True)
            base.commands.run_command(
                r'ffmpeg -loglevel quiet -i "{}" -vf fps={} "{}"'.format(
                    path, frame_rate, os.path.join(export_path,
                                                   'img%03d.jpg')))
            # subprocess.call(['ffmpeg', '-i', filename, '-vf', 'fps=%s' % frame_rate, r'img%03d.jpg'])
            return [
                dict(path=relative_path(path, self.myconfig('casedir')),
                     preview=list(
                         map(
                             lambda p: relative_path(
                                 os.path.join(export_path, p),
                                 self.myconfig('casedir')),
                             os.listdir(export_path))))
            ]
        except Exception as exc:
            self.logger().warning(
                'Cannot create snapshots from path=%s exc=%s', path, exc)
            return []
示例#4
0
文件: RVT_srum.py 项目: XorgX304/rvt2
    def run(self, path=""):
        """ Extracts SRUM artifacts of a disk """
        vss = self.myflag('vss')
        SRUM_TEMPLATE = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/SRUM_TEMPLATE2.xlsx")
        srum = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/srum_dump2.py")
        check_file(SRUM_TEMPLATE, error_missing=True)

        Search = GetFiles(self.config, vss=self.myflag("vss"))
        SOFTWARE = list(Search.search('windows/system32/config/SOFTWARE$'))
        SRUDB = list(Search.search('/windows/system32/sru/SRUDB.dat$'))
        python3 = os.path.join(self.myconfig('rvthome'), ".venv/bin/python3")

        out_folder = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(out_folder, create=True)

        if not SRUDB:
            self.logger().info("SRUDB.dat not found in any partition of the disk")
            return []

        for soft in SOFTWARE:
            partition = soft.split('/')[2]
            for srudb in SRUDB:
                if srudb.split('/')[2] == partition:
                    self.logger().info("Parsing SRUDB from partition {}".format(partition))
                    out_file = os.path.join(out_folder, 'srum_{}.xlsx'.format(partition))
                    run_command([python3, srum, "-i", os.path.join(self.myconfig('casedir'), srudb), "-t", SRUM_TEMPLATE,
                                "-r", os.path.join(self.myconfig('casedir'), soft), "-o", out_file], logger=self.logger())

                    self.convert_to_csv(out_folder, partition)
                    os.remove(out_file)
                    break
            else:
                self.logger().info("SRUDB.dat not found in partition: {}".format(partition))

        return []
示例#5
0
 def __init__(self, *args, disk=None, **kwargs):
     super().__init__(*args, **kwargs)
     self.disk = disk
     if disk is None:
         self.disk = getSourceImage(self.myconfig)
     self.string_path = self.myconfig('outdir')
     check_directory(self.string_path, create=True)
示例#6
0
    def parse_BITS(self):
        if self.vss:
            base_path = self.myconfig('voutdir')
            bitsdb = self.search.search(
                r"v\d+p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$"
            )
        else:
            base_path = self.myconfig('outdir')
            bitsdb = self.search.search(
                r"p\d+/ProgramData/Microsoft/Network/Downloader/qmgr0.dat$")
        check_directory(base_path, create=True)

        fields = OrderedDict([('job_id', None), ('name', None), ('desc', None),
                              ('type', None), ('priority', None),
                              ('sid', None), ('state', None), ('cmd', None),
                              ('args', None), ('file_count', 0),
                              ('file_id', 0), ('dest_fn', None),
                              ('src_fn', None), ('tmp_fn', None),
                              ('download_size', -1), ('transfer_size', -1),
                              ('drive', None), ('vol_guid', None),
                              ('ctime', None), ('mtime', None),
                              ('other_time0', None), ('other_time1', None),
                              ('other_time2', None), ('carved', False)])

        for f in bitsdb:
            analyzer = bits.Bits.load_file(
                os.path.join(self.myconfig('casedir'), f))
            jobs = analyzer.parse()
            res_generator = (OrderedDict([(field, j.get(field, fields[field]))
                                          for field in fields]) for j in jobs)
            output_file = os.path.join(base_path,
                                       "bitsdb_%s.csv" % f.split("/")[2])
            save_csv(res_generator,
                     outfile=output_file,
                     file_exists='OVERWRITE')
示例#7
0
    def parse_SysCache_hive(self):
        outfolder = self.myconfig('voutdir') if self.vss else self.myconfig(
            'outdir')
        # self.tl_file = os.path.join(self.myconfig('timelinesdir'), "%s_BODY.csv" % self.myconfig('source'))
        check_directory(outfolder, create=True)
        SYSC = self.search.search(r"/System Volume Information/SysCache.hve$")

        ripcmd = self.config.get('plugins.common', 'rip',
                                 '/opt/regripper/rip.pl')

        for f in SYSC:
            p = f.split('/')[2]
            output_text = run_command([
                ripcmd, "-r",
                os.path.join(self.myconfig('casedir'), f), "-p", "syscache_csv"
            ],
                                      logger=self.logger())
            output_file = os.path.join(outfolder, "syscache_%s.csv" % p)

            self.path_from_inode = FileSystem(
                config=self.config).load_path_from_inode(self.myconfig,
                                                         p,
                                                         vss=self.vss)

            save_csv(self.parse_syscache_csv(p, output_text),
                     outfile=output_file,
                     file_exists='OVERWRITE')

        self.logger().info("Finished extraction from SysCache")
示例#8
0
    def run(self, path=""):
        """ Main function to generate report files """
        vss = self.myflag('vss')
        check_directory(self.myconfig('mountdir'), error_missing=True)

        for p in os.listdir(self.myconfig('mountdir')):
            # parse only partition directories
            if (p.startswith('p') and not vss) or (p.startswith('v') and vss):
                regfiles = self.get_hives(p)
                self.generate_registry_output(p, regfiles)
        return []
示例#9
0
    def run(self, path=""):
        """ Main function to extract $Recycle.bin files. """
        if self.vss:
            output_path = self.myconfig('voutdir')
        else:
            output_path = self.myconfig('outdir')
            try:
                check_file(self.timeline_file, error_missing=True)
            except base.job.RVTError:
                return []

        check_directory(output_path, create=True)
        self.filesystem = FileSystem(self.config)

        # Get the users associated with each SID for every partition
        self.sid_user = {}
        if self.vss:
            for p in self.vss_partitions:
                self.sid_user[p] = self.generate_SID_user(p)
        else:
            for p in self.partitions:
                self.sid_user[p] = self.generate_SID_user(p)

        self.logger().info('Starting to parse RecycleBin')
        # RB_codes relates a a six digit recyclebin code with a path for a file. Are updated for each partition or vss?
        self.RB_codes = {}
        if self.vss:
            for partition in self.vss_partitions:
                self.logger().info(
                    'Processing Recycle Bin in partition {}'.format(partition))
                try:
                    self.parse_RecycleBin(partition)
                except Exception as exc:
                    if self.myflag('stop_on_error'):
                        raise exc
                    continue
                output_file = os.path.join(
                    output_path, "{}_recycle_bin.csv".format(partition))
                self.save_recycle_files(output_file, partition, sorting=True)
        else:
            try:
                self.parse_RecycleBin()
            except Exception as exc:
                if self.myflag('stop_on_error'):
                    raise exc
                return []
            output_file = os.path.join(output_path, "recycle_bin.csv")
            self.save_recycle_files(output_file, sorting=True)
        self.logger().info("Done parsing Recycle Bin!")

        return []
示例#10
0
    def run(self, path=""):
        vss = self.myflag('vss')

        self.indir = self.config.get('plugins.windows.RVT_evtx.Evtx', 'voutdir') if vss else self.config.get('plugins.windows.RVT_evtx.Evtx', 'outdir')
        self.outdir = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(self.indir, error_missing=True)
        check_directory(self.outdir, create=True)

        if not vss:
            self.main()
        else:
            for dir in os.listdir(self.indir):
                self.main(dir)
        return []
示例#11
0
    def run(self, path=""):
        self.vss = self.myflag('vss')
        self.search = GetFiles(self.config, vss=self.myflag("vss"))
        self.outfolder = self.myconfig(
            'voutdir') if self.vss else self.myconfig('outdir')
        check_directory(self.outfolder, create=True)

        self.logger().info(
            "Parsing artifacts from scheduled tasks files (.job)")
        self.parse_Task()
        self.logger().info(
            "Parsing artifacts from Task Scheduler Service log files (schedlgu.txt)"
        )
        self.parse_schedlgu()
        return []
示例#12
0
    def run(self, keyfile=""):
        """
        Searche contents of regex in output dir except in strings, searches and parser folders
        """
        self.logger().info("Searching at output folder")
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)

        grep = self.config.get('plugins.common', 'grep', '/bin/grep')

        skip_folders = ("strings", "parser", "searches")

        self.logger().info("Getting key list from {}".format(keyfile))
        keywords = getSearchItems(keyfile)

        temp_dir = tempfile.mkdtemp('outsearch')
        outdir = self.myconfig('outdir')
        check_directory(outdir, create=True)

        for kw, srch in keywords.items():
            output_file = os.path.join(temp_dir, "outsearch_{}.txt".format(kw))
            with open(output_file, "w") as f:
                f.write(
                    "\nKeyword: {}\n-----------------------------\n\n".format(
                        srch))
                f.flush()

                for item in os.listdir(self.myconfig('outputdir')):
                    folder = os.path.join(self.myconfig('outputdir'), item)
                    if os.path.isdir(folder) and item not in skip_folders:
                        run_command([grep, "-ilR", srch, item],
                                    stdout=f,
                                    from_dir=self.myconfig('outputdir'),
                                    logger=self.logger())

        try:
            for file in os.listdir(temp_dir):
                shutil.copy(os.path.join(temp_dir, file),
                            os.path.join(outdir, file))
        finally:
            shutil.rmtree(temp_dir)

        self.logger().info("OutSearch done")
        return []
示例#13
0
    def run(self, path=""):
        self.disk = getSourceImage(self.myconfig)

        keyfile = path
        self.logger().debug('Testing existance of {}'.format(keyfile))
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)

        # Get string files or generate them if not found
        self.string_path = self.myconfig('strings_dir')
        if not (check_directory(self.string_path)
                and os.listdir(self.string_path)):
            self.logger().debug("No string files found. Generating them")
            StringGenerate(config=self.config,
                           disk=self.disk).generate_strings()

        self.search_path = self.myconfig('outdir')
        check_directory(self.search_path, create=True)

        self.keywords = getSearchItems(
            keyfile)  # Get kw:regex dictionary reading keyfile
        self.blocks = {
        }  # Store set of blocks for kw and partition. Ex: {'my_kw': {'p02': set(1234, 1235, ...)}}
        self.block_status = defaultdict(
            dict
        )  # Store status for blocks with search hits in a partition. Ex:{'03':{4547:'Allocated', 1354536:'Not Allocated'}}

        self.fs_object = FileSystem(self.config, disk=self.disk)

        # Generate or load 'hits_' and 'blocks_' files
        for kname in tqdm(self.keywords,
                          total=len(self.keywords),
                          desc='Searching keywords in strings'):
            kw = kname.strip()
            self.get_blocks(kw, self.keywords[kname])

        # Generate 'all_' files
        self.get_cluster()

        self.logger().info("StringSearch done")
        return []
示例#14
0
    def generate(self, evtx_path):
        """ Auxiliary function """

        check_directory(evtx_path, create=True)
        evtx = self.config.get('plugins.common', 'evtxdump',
                               '/usr/local/bin/evtxdump.pl')

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))
        if self.vss:
            evtx_files = alloc_files.search(r"{}.*\.evtx$".format(
                evtx_path.split('/')[-1]))
        else:
            evtx_files = alloc_files.search(r"\.evtx$")

        errorlog = self.myconfig(
            'errorlog',
            os.path.join(self.myconfig('sourcedir'),
                         "{}_aux.log".format(self.myconfig('source'))))

        for i in evtx_files:
            evtx_file = os.path.join(self.myconfig('casedir'), i)
            if not check_file(evtx_file):
                self.logger().warning('File %s does not exist', evtx_file)
                continue
            self.logger().info("Parsing {}".format(i))
            name = os.path.join(evtx_path, os.path.basename(i))[:-4] + "txt"

            # if the output already exists, continue
            if check_file(name):
                self.logger().debug(
                    'The output file %s ready exists. Skipping', name)
                continue

            with open(name, "wb") as f:
                with open(errorlog, 'a') as logfile:
                    run_command([evtx, evtx_file],
                                stdout=f,
                                stderr=logfile,
                                logger=self.logger())
示例#15
0
    def run(self, path=""):
        """ Creates a report based on the output of LnkExtract.

        """
        vss = self.myflag('vss')
        self.logger().info("Generating lnk files report")

        self.mountdir = self.myconfig('mountdir')

        lnk_path = self.config.get('plugins.windows.RVT_lnk.LnkExtract',
                                   '{}outdir'.format('v' * vss))
        report_lnk_path = self.myconfig('{}outdir'.format('v' * vss))

        check_directory(lnk_path, error_missing=True)
        check_folder(report_lnk_path)

        outfile = os.path.join(report_lnk_path, 'recentfiles.csv')
        save_csv(self.report_recent(lnk_path),
                 config=self.config,
                 outfile=outfile,
                 quoting=0)

        return []
示例#16
0
    def report_search_kw(self, keyword, regex):
        """ Creates a pdf file from 'all_kw' file, using LaTex.

        Parameters:
            keyword (str): keyword name
            regex (str): regular expression associated to keyword
        """

        # TODO: do not break lines. Use lstlisting or something else
        pdflatex = self.myconfig('pdflatex', '/usr/bin/pdflatex')

        search_path = self.myconfig('search_dir')
        check_directory(search_path, error_missing=True)
        report_path = self.myconfig('outdir')
        check_directory(report_path, create=True)

        kw_utf8 = ''.join([i + '.' for i in keyword])
        # Avoid LaTeX special characters
        replaces = [(u'\ufffd', "."), ("\\", "/"), (r"{", "("), (r"]", ")"),
                    (r"$", "\\$"), (r"_", "\\_"), (r"%", "\\%"), (r"}", ")"),
                    (r"^", "."), (r"#", "\\#"), (r"~", "."), ("&", "\\&"),
                    ('"', "'"), (r"€", "euro")]
        line_width = 68  # number of characters per line in tex file

        for file in os.listdir(search_path):
            if not file.startswith("all_{}".format(keyword)):
                continue
            self.logger().info('Creating file {}'.format(file + '.pdf'))

            with open(os.path.join(report_path, file + ".tex"),
                      "w") as foutput:

                foutput.write(
                    "\\documentclass[a4paper,11pt,oneside]{report}\n\\usepackage[spanish]{babel}\n"
                )
                foutput.write("\\usepackage[utf8]{inputenc}\n")
                foutput.write("\\usepackage[pdftex]{color,graphicx}\n")
                foutput.write("\\usepackage[pdftex,colorlinks]{hyperref}\n")
                foutput.write("\\usepackage{fancyvrb}\n")
                foutput.write("\\usepackage{eurosym}\n")
                foutput.write("\\usepackage{listings}\n")
                foutput.write(
                    "\\lstset{breakatwhitespace=false,breaklines=true,frame=single}\n"
                )
                foutput.write("\\UseRawInputEncoding\n")
                foutput.write("\\begin{document}\n\n")
                foutput.write(
                    "\\section*{blindsearches in disk. Keyword:  \\emph{" +
                    keyword + "}}\n")
                initial = True

                if os.path.getsize(os.path.join(search_path, file)) == 0:
                    foutput.write("\\end{document}\n")
                    continue

                with open(os.path.join(search_path, file), "rb") as finput:
                    for line in finput:
                        line = line.decode("iso8859-15", "replace")
                        for r in replaces:
                            line = line.replace(r[0], r[1])

                        if line.startswith('Pt: p'):  # Block information
                            foutput.write(
                                "\\end{Verbatim}\n\n" if not initial else "")
                            foutput.write("\\newpage\n" if not initial else "")
                            initial = False
                            foutput.write("\\begin{lstlisting}\n")
                            foutput.write(line)
                            foutput.write("\\end{lstlisting}\n")
                            foutput.write(
                                "\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n")
                            continue

                        line = re.sub("[\x00-\x09\x0B-\x1F\x7F-\xFF]", ".",
                                      line)
                        # Write by chuncks. Note: Some hits may be missed this way
                        for chunk_line in [
                                line[i:i + line_width]
                                for i in range(0, len(line), line_width)
                        ]:
                            chunk_line = re.sub('({})'.format(regex),
                                                r"\\colorbox{green}{" + r'\1' +
                                                r"}",
                                                chunk_line,
                                                flags=re.I | re.M)
                            chunk_line = re.sub('({})'.format(kw_utf8),
                                                r"\\colorbox{green}{" + r'\1' +
                                                r"}",
                                                chunk_line,
                                                flags=re.I | re.M)
                            foutput.write(chunk_line + "\n")

                foutput.write("\\end{Verbatim}\n")
                foutput.write("\\end{document}\n")

            run_command(
                [pdflatex, "-output-directory", report_path, file + ".tex"],
                logger=self.logger())
            break

        else:
            self.logger().warning(
                'No file: all_{}. Perhaps there is no match for the keyword'.
                format(keyword))

        for file in os.listdir(report_path):
            if file.endswith(".log") or file.endswith(".tex") or file.endswith(
                    ".aux") or file.endswith(".toc") or file.endswith(
                        ".out") or file.endswith(".synctex.gz"):
                os.remove(os.path.join(report_path, file))
示例#17
0
    def run(self, path=""):
        """ Generator of INDX entries as dictionaries. Also writes to csv files"""
        self.disk = getSourceImage(self.myconfig)
        self.sector_size = self.disk.sectorsize

        self.parseINDX_ROOTFiles = self.myflag(
            'root', False)  # Parse also INDX_ROOT records if set
        self.skip_short_filenames = self.myflag('skip_short', False)
        self.only_slack = self.myflag('only_slack', False)

        outdir = self.myconfig('outdir')
        check_directory(outdir, create=True)

        for p in self.disk.partitions:
            if not p.isMountable:
                continue

            # Get a dictionary {inode: list of names} from 'fls' to later relate inodes to a path. 'inode' keys are strings, not int.
            part_name = ''.join(['p', p.partition])
            try:
                self.inode_fls = FileSystem(
                    self.config).load_path_from_inode(partition=part_name)
                self.logger().debug(
                    'Correctly loaded inode-name relation file for partiton {}'
                    .format(part_name))
            except Exception as e:
                self.logger().error(e)
                continue

            # Start the carving at next to last execution block parsed
            outfile = os.path.join(
                outdir, '{}{}_INDX_timeline.csv'.format(
                    part_name, '_slack' if self.only_slack else ''))
            self.lastParsedBlk = 0
            if self.myflag('use_localstore'):
                self.lastParsedBlk = int(
                    self.config.store_get(
                        'last_{}_block_parsed'.format(part_name), 0))
            self.logger().debug('lastParsedBlk: {}'.format(self.lastParsedBlk))

            csv_args = {'file_exists': 'APPEND', 'write_header': True}
            if self.lastParsedBlk:
                if not os.path.exists(outfile):
                    self.logger().warning(
                        'Starting new file {0} at an advanced offset. Set "last_{0}_block_parsed" at 0 in "store.ini" if a fresh start is desired'
                        .format(outfile))
                else:
                    csv_args['write_header'] = False
            else:
                if os.path.exists(outfile):
                    self.logger().warning(
                        'Overwriting file {}'.format(outfile))
                    csv_args['file_exists'] = 'OVERWRITE'

            # Write the parsed entries to a csv file for each partition.
            save_csv(self.parse_INDX(p),
                     config=self.config,
                     outfile=outfile,
                     quoting=0,
                     **csv_args)
        return []
示例#18
0
    def characterize_Linux(self):
        """

        """

        self.outfile = self.myconfig('outfile')
        check_directory(os.path.dirname(self.outfile), create=True)

        for p in self.disk.partitions:
            part_path = os.path.join(self.myconfig('mountdir'),
                                     "p%s" % p.partition)
            if not os.path.isdir(os.path.join(part_path, "etc")):
                continue
            releas_f = ""
            if os.path.isfile(os.path.join(
                    part_path, "etc/lsb-release")) or os.path.islink(
                        os.path.join(part_path, "etc/lsb-release")):
                releas_f = os.path.join(part_path, "etc/lsb-release")
                if os.path.islink(releas_f):
                    releas_f = os.path.join(part_path,
                                            os.path.realpath(releas_f)[1:])
            else:
                for f in os.listdir(os.path.join(part_path, "etc")):
                    if f.endswith("-release"):
                        releas_f = os.path.join(part_path, "etc", f)

            with open(self.outfile, 'w') as out_f:
                if releas_f != "":
                    out_f.write("Information of partition {}\n\n".format(
                        p.partition))
                    f_rel = open(releas_f, "r")
                    dist_id = f_rel.readline().split("=")[-1].rstrip()
                    dist_rel = f_rel.readline().split("=")[-1].rstrip()
                    dist_coden = f_rel.readline().split("=")[-1].rstrip()
                    dist_desc = f_rel.readline().split("=")[-1].rstrip()
                    kernel_v = ""
                    f_hostname = open(os.path.join(part_path, "etc/hostname"),
                                      "r")
                    hostname = f_hostname.read().rstrip()
                    f_hostname.close()
                    f_rel.close()
                    if os.path.isfile(os.path.join(part_path,
                                                   "var/log/dmesg")):
                        f_dmesg = open(
                            os.path.join(part_path, "var/log/dmesg"), "r")
                        for linea in f_dmesg:
                            aux = re.search(r"(Linux version [^\s]*)", linea)
                            if aux:
                                kernel_v = aux.group(1)
                                break
                        f_dmesg.close()
                out_f.write(
                    "Distribution ID:\t\t{}\nDistribution Release:\t\t{}\nDistribution codename:\t\t{}\nDistribution description:\t{}\nKernel version:\t{}\nHostname:\t{}\n"
                    .format(dist_id, dist_rel, dist_coden, dist_desc, kernel_v,
                            hostname))

                install_date = ""

                if os.path.isdir(
                        os.path.join(self.myconfig('mountdir'),
                                     "p%s" % p.partition, "root")):
                    item = os.path.join(self.myconfig('source'), 'mnt',
                                        "p%s" % p.partition, "root")
                    install_date = self.filesystem.get_macb([item])[item][3]

                for f in [
                        "root/install.log", "var/log/installer/syslog",
                        "root/anaconda-ks.cfg"
                ]:
                    if os.path.isfile(
                            os.path.join(self.myconfig('mountdir'),
                                         "p%s" % p.partition, f)):
                        item = os.path.join(self.myconfig('source'), 'mnt',
                                            "p%s" % p.partition, f)
                        install_date = self.filesystem.get_macb([item
                                                                 ])[item][3]
                        break

                if install_date != "":
                    out_f.write("Install date:\t{}\n\n".format(install_date))

            # usuarios
            self.get_linux_lastlog(p.partition)

            temp = self.get_linux_wtmp(os.path.join(part_path, "var/log"))

            # temp = subprocess.check_output('last -f {} --time-format iso'.format(os.path.join(part_path, "var/log/wtmp")), shell=True).decode("utf-8")
            with open(self.outfile, 'a') as out_f:
                out_f.write("\nLogins:\n\n{}".format(temp))
示例#19
0
    def get_hive_files(self, path):
        """ Retrieves all hives found in source if path is not specified.

            Attrs:
                path: path to registry hive
        """
        if path:
            if os.path.exists(path):
                return path
            else:
                raise base.job.RVTError('path {} does not exist'.format(path))

        check_directory(self.myconfig('mountdir'), error_missing=True)

        regfiles = {}

        Find = GetFiles(self.config, vss=self.myflag("vss"))

        for main_hive in ['SYSTEM', 'SOFTWARE', 'SAM', 'SECURITY']:
            for item in Find.search(
                    "/Windows/System32/config/{}$".format(main_hive)):
                hive = item.split('/')[-1].lower()
                if hive not in regfiles:  # Get only the first hit
                    regfiles[hive] = os.path.join(self.myconfig('casedir'),
                                                  item)

        if "software" not in regfiles.keys():
            self.logger().warning('No SOFTWARE hive found in source')
            return {}

        NTUSER = Find.search(
            r"/(Documents and settings|users)/.*/(NTUSER|UsrClass)\.dat$")

        usr = defaultdict(list)
        regfiles["ntuser"] = {}
        regfiles["usrclass"] = {}

        for item in NTUSER:
            aux = re.search("(Documents and settings|Users)/([^/]*)/", item,
                            re.I)
            user = aux.group(2)
            hive_name = 'ntuser' if item.lower().endswith(
                "ntuser.dat") else 'usrclass'
            if user not in usr[hive_name]:
                usr[hive_name].append(user)
            else:  # Get only the first hit
                continue
            if hive_name == "ntuser":
                regfiles["ntuser"][user] = os.path.join(
                    self.myconfig('casedir'), item)
            else:
                regfiles["usrclass"][user] = os.path.join(
                    self.myconfig('casedir'), item)

        amcache = list(Find.search("/Windows/AppCompat/Programs/Amcache.hve"))
        if len(amcache) != 0:
            regfiles["amcache"] = os.path.join(self.myconfig('casedir'),
                                               amcache[0])
        syscache = list(Find.search(r"/syscache.hve$"))
        if len(syscache) != 0:
            regfiles["syscache"] = os.path.join(self.myconfig('casedir'),
                                                syscache[0])

        return regfiles
示例#20
0
    def characterize_Windows(self):
        """ Characterize Windows partitions from registry files and timeline. """

        hives_dir = self.myconfig('hivesdir')

        # Check registry is parsed. Generate the minimum files needed otherwise
        ripplugins_file = self.myconfig('ripplugins')
        if not check_directory(hives_dir):
            module = base.job.load_module(
                self.config,
                'plugins.windows.RVT_autorip.Autorip',
                extra_config=dict(ripplugins=ripplugins_file))
            list(module.run())

        # Get the autorip outputfile associated with each necessary plugin
        with open(ripplugins_file) as rf:
            ripplugins = json.load(rf)
        used_plugins = [
            'winnt_cv', 'shutdown', 'timezone', 'lastloggedon',
            'processor_architecture', 'compname', 'samparse', 'profilelist'
        ]
        os_plugins = [
            'winnt_cv', 'shutdown', 'timezone', 'lastloggedon',
            'processor_architecture', 'compname'
        ]
        plugin_files = {
            plug: p['file']
            for plug in used_plugins for p in ripplugins
            if plug in p['plugins']
        }

        plugin_fields = {
            'winnt_cv': [
                '  ProductName', '  CurrentVersion', '  InstallationType',
                '  EditionID', '  CurrentBuild', '  ProductId',
                '  RegisteredOwner', '  RegisteredOrganization',
                '  InstallDate'
            ],
            'shutdown': ['  ShutdownTime'],
            'processor_architecture': ['PROCESSOR_ARCHITECTURE'],
            'compname': ['ComputerName']
        }

        field_names = {
            '  ProductName': 'ProductName',
            '  CurrentVersion': 'CurrentVersion',
            '  InstallationType': 'InstallationType',
            '  EditionID': 'EditionID',
            '  CurrentBuild': 'CurrentBuild',
            '  ProductId': 'ProductId',
            '  RegisteredOwner': 'RegisteredOwner',
            '  RegisteredOrganization': 'RegisteredOrganization',
            '  InstallDate': 'InstallDate',
            '  ShutdownTime': 'ShutdownTime',
            '  TimeZoneKeyName': 'TimeZone',
            'PROCESSOR_ARCHITECTURE': 'ProcessorArchitecture',
            'ComputerName': 'ComputerName'
        }

        partitions = [
            folder for folder in sorted(os.listdir(self.myconfig('mountdir')))
            if folder.startswith('p')
        ]

        # Define self.ntusers, that gets the creation date of NTUSER.DAT for every user and partition
        self.make_ntuser_timeline()

        # Main loop to populate os_info
        os_info = defaultdict(dict)
        for part in partitions:
            for plug in os_plugins:
                hivefile = os.path.join(
                    hives_dir, '{}_{}.txt'.format(plugin_files[plug], part))
                if not check_file(hivefile):
                    continue
                with open(hivefile) as f_in:
                    if plug == 'lastloggedon':
                        for line in f_in:
                            if line.startswith('LastLoggedOn'):
                                f_in.readline()
                                last_write = f_in.readline()[11:].rstrip('\n')
                                f_in.readline()
                                last_user = f_in.readline()[22:].rstrip('\n')
                                os_info[part][
                                    'LastLoggedOn'] = '{} ({})'.format(
                                        last_write, last_user)
                                break
                        continue
                    elif plug == 'timezone':
                        for line in f_in:
                            if line.startswith('TimeZoneInformation'):
                                bias, tz_name = '', ''
                                while not line.startswith(
                                        '....................') and line != "":
                                    line = f_in.readline()
                                    if line.startswith('  Bias'):
                                        bias = line[line.find('('):].rstrip(
                                            '\n')
                                    if line.startswith('  TimeZoneKeyName'):
                                        line = line[len('  TimeZoneKeyName') +
                                                    3:].rstrip('\n')
                                        tz_name = line[:line.find('Time') + 4]
                                os_info[part]['TimeZone'] = '{} {}'.format(
                                    tz_name, bias)
                                break
                        continue

                    for field in plugin_fields[plug]:
                        f_in.seek(0)
                        for line in f_in:
                            if line.startswith(field):
                                os_info[part][
                                    field_names[field]] = line[len(field) +
                                                               3:].rstrip('\n')
                                break

            # Skip displaying partition info if it does not contain an OS
            if not os_info.get(part, None):
                self.logger().debug(
                    'No OS information for partition {}'.format(part))
                continue

            # Users Info
            hivefile = os.path.join(
                hives_dir, '{}_{}.txt'.format(plugin_files['samparse'], part))
            line = '  '
            users = []
            user_profiles = []
            if check_file(hivefile):
                with open(hivefile) as f_in:
                    # Parse samparse
                    while not line.startswith('profilelist') and line != "":
                        line = f_in.readline()

                        aux = re.search(r"Username\s*:\s*(.*)\n", line)
                        if aux:
                            user = [aux.group(1), "", ""]
                            while line != "\n":
                                line = f_in.readline()
                                aux = re.search(
                                    r"Account Created\s*:\s*(.*)\n", line)
                                if aux:
                                    aux1 = aux.group(1).replace("  ", " ")
                                    date = datetime.datetime.strptime(
                                        aux1, '%a %b %d %H:%M:%S %Y Z')
                                    user[1] = date.strftime(
                                        '%d-%m-%Y %H:%M:%S UTC')
                                    continue
                                aux = re.search(
                                    r"Last Login Date\s*:\s*(.*)\n",
                                    line)  # TODO: check this field is reliable
                                if aux:
                                    if aux.group(1).find("Never") == -1:
                                        aux1 = aux.group(1).replace("  ", " ")
                                        date = datetime.datetime.strptime(
                                            aux1, '%a %b %d %H:%M:%S %Y Z')
                                        user[2] = date.strftime(
                                            '%d-%m-%Y %H:%M:%S UTC')
                                    else:
                                        user[2] = "Never"
                                    users.append(user)
                                    break

                    # Parse profilelist
                    line = '  '
                    while not line.startswith(
                            '....................') and line != "":
                        line = f_in.readline()
                        aux = re.match(r"Path\s*:\s*.:.Users.(.*)",
                                       line.strip())
                        if aux:
                            # import pudb; pudb.set_trace()
                            user = [aux.group(1), "", ""]
                            while line != "\n":
                                line = f_in.readline()
                                aux = re.search(r"LastWrite\s*:\s*(.*)",
                                                line.strip())
                                if aux:
                                    aux1 = aux.group(1).replace("  ", " ")
                                    date = datetime.datetime.strptime(
                                        aux1, '%a %b %d %H:%M:%S %Y (UTC)')
                                    user[2] = date.strftime(
                                        "%d-%m-%Y %H:%M:%S UTC")
                                    user_profiles.append(user)

            # Get creation date from NTUSER.DAT if not found in profilelist
            for i in user_profiles:
                for j in self.ntusers[part]:
                    if i[0] == j[0] and i[1] == "":
                        i[1] = j[1].strftime('%d-%m-%Y %H:%M:%S UTC')
            os_info[part]["users"] = users
            os_info[part]["user_profiles"] = user_profiles
        return os_info
示例#21
0
    def run(self, path=""):
        self.disk = getSourceImage(self.myconfig)
        if not self.disk.exists():
            self.logger().error(self.disk)
            return

        self.source = self.myconfig('source')
        self.outFolder = self.myconfig('deleteddir')
        check_directory(self.outFolder, create=True)

        # Set maximal dates for later update
        self.firstDate = datetime.date.today() + datetime.timedelta(days=365)
        self.lastDate = datetime.date(1970, 1, 1)

        # Process Timeline deleted files
        self.timelineBodyFile = os.path.join(self.myconfig('timelinesdir'), '{}_BODY.csv'.format(self.source))
        check_file(self.timelineBodyFile, error_missing=True)
        # cmd = r"grep '(deleted' {} | grep -v FILE_NAME | cut -d'|' -f2 | sed 's_^[0-9-][0-9-]*/mnt/\(.*\) (deleted.*$_\1_' | sort -u".format(self.timelineBodyFile)
        cmd = r"grep '(deleted' {} | grep -v '\$FILE_NAME' | cut -d'|' -f2,3,7".format(self.timelineBodyFile)
        deletedTimelineFiles = shell_command(cmd)
        df_timeline = self.get_dataframe(deletedTimelineFiles, 'timeline')

        # Process Recycle
        self.recycleFile = os.path.join(self.myconfig('recycledir'), 'recycle_bin.csv')
        check_file(self.recycleFile, error_missing=True)
        df_recycle = self.get_dataframe(self.recycleFile, 'recycle')

        # Process UsnJrnl and INDX
        df_usnjrnl = pd.DataFrame()
        df_indx = pd.DataFrame()
        for p in self.disk.partitions:
            self.partName = ''.join(['p', p.partition])
            if p.isMountable:

                self.usnJrnlFile = os.path.join(self.myconfig('journaldir'), 'UsnJrnl_{}.csv'.format(p.partition))
                check_file(self.usnJrnlFile, error_missing=True)
                df_u = self.get_dataframe(shell_command(r"grep 'DELETE CLOSE' {} | cut -d',' -f 1,2,4".format(self.usnJrnlFile)), 'usnjrnl')

                self.indxFile = os.path.join(self.myconfig('timelinesdir'), '{}_INDX_timeline.csv'.format(p.partition))
                if not check_file(self.indxFile):
                    df_i = pd.DataFrame()
                # cmd = "grep -v 'SHORT FILENAME FORMAT' {} | grep -v 'NOT OBTAINED' | grep -v 'invalid MFTReference' | cut -d ';' -f 3,4,5,7".format(self.indxFile)   # real
                # cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | grep -va 'invalid MFTReference' | cut -d ';' -f 2,5,9,14 ".format(self.indxFile)  # unsorted
                # cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | cut -d ';' -f 2,5,9,14 ".format(self.indxFile)  # unsorted
                cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | cut -d ';' -f 3,4,6,7,9 ".format(self.indxFile)  # real
                df_i = self.get_dataframe(shell_command(cmd), 'indx')

                df_usnjrnl = self.join_dataframes(df_usnjrnl, df_u)
                df_indx = self.join_dataframes(df_indx, df_i)

        # TODO: timeline_all does not need columns source or reliable
        # Compare Timeline against INDX to extract unique (assuming deleted) files in INDX
        cmd = r"cut -d'|' -f2 {} | grep -v '\$FILE_NAME'".format(self.timelineBodyFile)
        df_all_timeline = self.get_dataframe(shell_command(cmd), 'timeline_all')
        self.logger().debug('Obtaining unique files in INDX')
        df_indx = self.get_deleted_in_INDX(df_all_timeline, df_indx)

        # Create a global dataframe with all artifacts
        self.logger().info('Combining artifacts to create a full list of deleted files')
        df_global = self.combine_artifacts([df_usnjrnl, df_recycle, df_timeline, df_indx])
        print(df_global.shape, df_global.columns)
        duplicated_bin = df_global.duplicated('Filename', keep='first')  # First sources have precedence
        self.logger().info('Found {} duplicated files merging sources'.format(duplicated_bin.sum()))
        print('before dropping', df_global.shape)
        df_global = df_global[~duplicated_bin]
        # df_global.drop_duplicates('Filename', keep='first', inplace=True)
        print('after dropping', df_global.shape)
        print(df_global.columns)
        print(df_global.head())

        # Save global DataFrame
        # outfile = os.path.join(self.outFolder, '{}_deleted.csv'.format(self.source))
        outfile = '/home/pgarcia/global_deleted.csv'
        with open(outfile, 'w') as f:
            f.write(df_global.to_csv(index=False))

        # exit()
        # Create number of files summary based on day, hour and partition
        self.get_stats(self.join_dataframes(df_usnjrnl, df_recycle), 'all')