Exemple #1
0
    def run(self, path=""):
        """ Extracts SRUM artifacts of a disk """
        vss = self.myflag('vss')
        SRUM_TEMPLATE = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/SRUM_TEMPLATE2.xlsx")
        srum = os.path.join(self.myconfig('rvthome'), "plugins/external/srum-dump/srum_dump2.py")
        check_file(SRUM_TEMPLATE, error_missing=True)

        Search = GetFiles(self.config, vss=self.myflag("vss"))
        SOFTWARE = list(Search.search('windows/system32/config/SOFTWARE$'))
        SRUDB = list(Search.search('/windows/system32/sru/SRUDB.dat$'))
        python3 = os.path.join(self.myconfig('rvthome'), ".venv/bin/python3")

        out_folder = self.myconfig('voutdir') if vss else self.myconfig('outdir')
        check_directory(out_folder, create=True)

        if not SRUDB:
            self.logger().info("SRUDB.dat not found in any partition of the disk")
            return []

        for soft in SOFTWARE:
            partition = soft.split('/')[2]
            for srudb in SRUDB:
                if srudb.split('/')[2] == partition:
                    self.logger().info("Parsing SRUDB from partition {}".format(partition))
                    out_file = os.path.join(out_folder, 'srum_{}.xlsx'.format(partition))
                    run_command([python3, srum, "-i", os.path.join(self.myconfig('casedir'), srudb), "-t", SRUM_TEMPLATE,
                                "-r", os.path.join(self.myconfig('casedir'), soft), "-o", out_file], logger=self.logger())

                    self.convert_to_csv(out_folder, partition)
                    os.remove(out_file)
                    break
            else:
                self.logger().info("SRUDB.dat not found in partition: {}".format(partition))

        return []
Exemple #2
0
    def get_blocks(self, kw, regex):
        """ Updates variable self.blocks, that stores set of blocks for kw and partition, creating new 'block' and 'hits' files """
        self.blocks_file_path = os.path.join(self.search_path,
                                             "blocks_{}".format(kw))
        hits_file = os.path.join(self.search_path, "hits_%s" % kw)

        # Create hits file if not found
        if not check_file(hits_file) or os.path.getsize(hits_file) == 0:
            self.logger().debug('Creating {} file'.format("hits_%s" % kw))
            extra_args = {'write_header': True, 'file_exists': 'OVERWRITE'}
            save_csv(self.search_strings(kw, regex),
                     config=self.config,
                     outfile=hits_file,
                     **extra_args)

        # Create or load blocks file if not found
        if not check_file(self.blocks_file_path) or os.path.getsize(
                self.blocks_file_path) == 0:
            self.blocks[kw] = defaultdict(list)
            cmd = "sed -n '1!p' {} | cut -d ';' -f1,3 | sort | uniq".format(
                hits_file)
            for line in yield_command(cmd, logger=self.logger()):
                part, blk = line.split(';')
                part = part.strip('"')
                self.blocks[kw][part].append(int(blk.strip('"').rstrip('\n')))
            self.save_blocks_file(self.blocks[kw], kw)
        else:
            self.logger().info('Loading {} file'.format("blocks_%s" % kw))
            try:
                with open(self.blocks_file_path, "r") as block_file:
                    self.blocks[kw] = json.load(block_file)
            except Exception as exc:
                self.logger().error('Cannot load {}'.format(
                    self.blocks_file_path))
                raise exc
Exemple #3
0
def getSourceImage(myconfig):
    # Try to scan 'imagefile' first if supplied
    imagefile = myconfig('imagefile')
    if imagefile:
        check_file(imagefile, error_missing=True)
        try:
            ext = os.path.basename(imagefile).split('.')[-1]
            return KNOWN_IMAGETYPES[ext]['imgclass'](
                imagefile=imagefile,
                imagetype=KNOWN_IMAGETYPES[ext]['type'],
                params=myconfig)
        except KeyError:
            raise base.job.RVTError(
                'Image file {} has unrecognized image extension format: {}'.
                format(imagefile, ext))

    source = myconfig('source')
    imagedir = myconfig('imagedir')
    for ext in KNOWN_IMAGETYPES.keys():
        ifile = os.path.join(imagedir, "%s.%s" % (myconfig('source'), ext))
        if check_file(ifile):
            return KNOWN_IMAGETYPES[ext]['imgclass'](
                imagefile=ifile,
                imagetype=KNOWN_IMAGETYPES[ext]['type'],
                params=myconfig)
    logging.warning('Image file not found for source=%s in imagedir=%s',
                    source, imagedir)
    return DummyImage(imagefile=None, imagetype='dummy', params=myconfig)
Exemple #4
0
    def run(self, path=""):
        keyfile = path
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)
        keywords = getSearchItems(keyfile)

        for kname, regex in keywords.items():
            self.report_search_kw(kname, regex)
        return []
Exemple #5
0
    def parse_RecycleBin(self, partition=None):
        """ Search all Recycle.Bin files found on the timeline. Both allocated and deleted. """
        # Find the $I files first so a list of codes associated to RecycleBin files can be created
        # Then uses that list to assign names and data to $R files found later.
        self.i_files = {}
        self.r_files = []

        if self.vss:
            self.timeline_file = os.path.join(self.myconfig('vtimelinesdir'),
                                              '{}_BODY.csv'.format(partition))
            try:
                check_file(self.timeline_file, error_missing=True)
            except base.job.RVTError as e:
                self.logger().warning('{}. Skipping vss {}'.format(
                    e, partition))
                return
        self.logger().debug('Timeline file: {}'.format(self.timeline_file))

        search_command = 'grep -P "{regex}" "{path}"'

        # Parse $I files in RecycleBin:
        self.logger().info('Searching RecycleBin $I files')
        # Realloc files have metadata pointing to new allocated data that does not match the filename.
        # They cannot be recovered, but the reference to an older name can give some usefull information, so they are included
        regex = [r'\$Recycle\.Bin.*\$I', r'\$RECYCLE\.BIN.*\$I']
        module = base.job.load_module(self.config,
                                      'base.commands.RegexFilter',
                                      extra_config=dict(cmd=search_command,
                                                        keyword_list=regex))

        if not os.path.exists(self.timeline_file) or os.path.getsize(
                self.timeline_file) == 0:
            self.logger().error(
                'Timeline BODY file not found or empty for partition {}. Run fs_timeline job before executing winRecycle'
                .format(partition))
            raise base.job.RVTError(
                'Timeline BODY file not found or empty for partition {}. Run fs_timeline job before executing winRecycle'
                .format(partition))

        for line in module.run(self.timeline_file):
            self._process_I_file(line['match'], partition)

        # Parse $R files in RecycleBin:
        self.logger().info('Searching RecycleBin $R files')
        regex = [r'\$Recycle\.Bin.*\$R', r'\$RECYCLE\.BIN.*\$R']
        module = base.job.load_module(self.config,
                                      'base.commands.RegexFilter',
                                      extra_config=dict(cmd=search_command,
                                                        keyword_list=regex))

        for line in module.run(self.timeline_file):
            self._process_R_file(line['match'], partition)
Exemple #6
0
    def run(self, path=""):
        """ Main function to extract $Recycle.bin files. """
        if self.vss:
            output_path = self.myconfig('voutdir')
        else:
            output_path = self.myconfig('outdir')
            try:
                check_file(self.timeline_file, error_missing=True)
            except base.job.RVTError:
                return []

        check_directory(output_path, create=True)
        self.filesystem = FileSystem(self.config)

        # Get the users associated with each SID for every partition
        self.sid_user = {}
        if self.vss:
            for p in self.vss_partitions:
                self.sid_user[p] = self.generate_SID_user(p)
        else:
            for p in self.partitions:
                self.sid_user[p] = self.generate_SID_user(p)

        self.logger().info('Starting to parse RecycleBin')
        # RB_codes relates a a six digit recyclebin code with a path for a file. Are updated for each partition or vss?
        self.RB_codes = {}
        if self.vss:
            for partition in self.vss_partitions:
                self.logger().info(
                    'Processing Recycle Bin in partition {}'.format(partition))
                try:
                    self.parse_RecycleBin(partition)
                except Exception as exc:
                    if self.myflag('stop_on_error'):
                        raise exc
                    continue
                output_file = os.path.join(
                    output_path, "{}_recycle_bin.csv".format(partition))
                self.save_recycle_files(output_file, partition, sorting=True)
        else:
            try:
                self.parse_RecycleBin()
            except Exception as exc:
                if self.myflag('stop_on_error'):
                    raise exc
                return []
            output_file = os.path.join(output_path, "recycle_bin.csv")
            self.save_recycle_files(output_file, sorting=True)
        self.logger().info("Done parsing Recycle Bin!")

        return []
    def make_ntuser_timeline(self):
        """ Get user creation date from the birth time of NTUSER.dat """

        timeline_file = os.path.join(
            self.config.get('plugins.common', 'timelinesdir'),
            '{}_TL.csv'.format(self.myconfig('source')))
        if not check_file(timeline_file):
            self.logger().warning(
                'Timeline file not found: {}'.format(timeline_file))
            self.ntusers = {}
            return
        ntusers = defaultdict(list)
        with open(timeline_file, "r", encoding="iso8859-15") as tl_f:
            for line in tl_f:
                mo = re.search(
                    r"mnt/(p\d+)/(?:Documents and settings|Users)/([^/]*)/(?:NTUSER|UsrClass)\.dat\"",
                    line, re.IGNORECASE)
                if mo is not None:
                    part, user = mo.group(1), mo.group(2)
                    line = line.split(',')
                    if line[2][3] != 'b':
                        continue
                    if line[0].endswith("Z"):
                        date = datetime.datetime.strptime(
                            line[0], '%Y-%m-%dT%H:%M:%SZ')
                    else:
                        date = datetime.datetime.strptime(
                            line[0], '%Y %m %d %a %H:%M:%S')
                    if user not in ntusers[part]:
                        ntusers[part].append((user, date))

        self.ntusers = ntusers
Exemple #8
0
    def save_recycle_files(self, output_file, partition=None, sorting=True):
        """ Sort recycle bin files by date and save to 'output_file' csv. """
        if not (len(self.i_files) or len(self.r_files)):
            self.logger().info('No RecycleBin files found{}.'.format(
                ' in partition {}'.format(partition if partition else '')))
            return
        if sorting:
            self.RB_files = list(self.i_files.values()) + self.r_files
            self.RB_files = sorted(self.RB_files, key=lambda it: it['Date'])
        else:
            self.RB_files = chain(self.i_files.values(), self.r_files)

        check_file(output_file, delete_exists=True)
        save_csv(self.RB_files,
                 outfile=output_file,
                 quoting=0,
                 file_exists='OVERWRITE')
Exemple #9
0
    def run(self, keyfile=""):
        """
        Searche contents of regex in output dir except in strings, searches and parser folders
        """
        self.logger().info("Searching at output folder")
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)

        grep = self.config.get('plugins.common', 'grep', '/bin/grep')

        skip_folders = ("strings", "parser", "searches")

        self.logger().info("Getting key list from {}".format(keyfile))
        keywords = getSearchItems(keyfile)

        temp_dir = tempfile.mkdtemp('outsearch')
        outdir = self.myconfig('outdir')
        check_directory(outdir, create=True)

        for kw, srch in keywords.items():
            output_file = os.path.join(temp_dir, "outsearch_{}.txt".format(kw))
            with open(output_file, "w") as f:
                f.write(
                    "\nKeyword: {}\n-----------------------------\n\n".format(
                        srch))
                f.flush()

                for item in os.listdir(self.myconfig('outputdir')):
                    folder = os.path.join(self.myconfig('outputdir'), item)
                    if os.path.isdir(folder) and item not in skip_folders:
                        run_command([grep, "-ilR", srch, item],
                                    stdout=f,
                                    from_dir=self.myconfig('outputdir'),
                                    logger=self.logger())

        try:
            for file in os.listdir(temp_dir):
                shutil.copy(os.path.join(temp_dir, file),
                            os.path.join(outdir, file))
        finally:
            shutil.rmtree(temp_dir)

        self.logger().info("OutSearch done")
        return []
Exemple #10
0
    def run(self, path=""):
        self.disk = getSourceImage(self.myconfig)

        keyfile = path
        self.logger().debug('Testing existance of {}'.format(keyfile))
        if not keyfile:
            keyfile = self.myconfig('keyfile')
        check_file(keyfile, error_missing=True)

        # Get string files or generate them if not found
        self.string_path = self.myconfig('strings_dir')
        if not (check_directory(self.string_path)
                and os.listdir(self.string_path)):
            self.logger().debug("No string files found. Generating them")
            StringGenerate(config=self.config,
                           disk=self.disk).generate_strings()

        self.search_path = self.myconfig('outdir')
        check_directory(self.search_path, create=True)

        self.keywords = getSearchItems(
            keyfile)  # Get kw:regex dictionary reading keyfile
        self.blocks = {
        }  # Store set of blocks for kw and partition. Ex: {'my_kw': {'p02': set(1234, 1235, ...)}}
        self.block_status = defaultdict(
            dict
        )  # Store status for blocks with search hits in a partition. Ex:{'03':{4547:'Allocated', 1354536:'Not Allocated'}}

        self.fs_object = FileSystem(self.config, disk=self.disk)

        # Generate or load 'hits_' and 'blocks_' files
        for kname in tqdm(self.keywords,
                          total=len(self.keywords),
                          desc='Searching keywords in strings'):
            kw = kname.strip()
            self.get_blocks(kw, self.keywords[kname])

        # Generate 'all_' files
        self.get_cluster()

        self.logger().info("StringSearch done")
        return []
Exemple #11
0
 def load_partition(self):
     """ Load partition variables from JSON file. Avoids running mmls every time """
     infile = os.path.join(self.myconfig('auxdir'),
                           'p{}_info.json'.format(self.partition))
     if check_file(infile) and os.path.getsize(infile) != 0:
         with open(infile) as inputfile:
             try:
                 return json.load(inputfile)
             except Exception:
                 self.logger.warning(
                     'JSON file {} malformed'.format(infile))
                 return False
     return False
Exemple #12
0
    def generate(self, evtx_path):
        """ Auxiliary function """

        check_directory(evtx_path, create=True)
        evtx = self.config.get('plugins.common', 'evtxdump',
                               '/usr/local/bin/evtxdump.pl')

        alloc_files = GetFiles(self.config, vss=self.myflag("vss"))
        if self.vss:
            evtx_files = alloc_files.search(r"{}.*\.evtx$".format(
                evtx_path.split('/')[-1]))
        else:
            evtx_files = alloc_files.search(r"\.evtx$")

        errorlog = self.myconfig(
            'errorlog',
            os.path.join(self.myconfig('sourcedir'),
                         "{}_aux.log".format(self.myconfig('source'))))

        for i in evtx_files:
            evtx_file = os.path.join(self.myconfig('casedir'), i)
            if not check_file(evtx_file):
                self.logger().warning('File %s does not exist', evtx_file)
                continue
            self.logger().info("Parsing {}".format(i))
            name = os.path.join(evtx_path, os.path.basename(i))[:-4] + "txt"

            # if the output already exists, continue
            if check_file(name):
                self.logger().debug(
                    'The output file %s ready exists. Skipping', name)
                continue

            with open(name, "wb") as f:
                with open(errorlog, 'a') as logfile:
                    run_command([evtx, evtx_file],
                                stdout=f,
                                stderr=logfile,
                                logger=self.logger())
Exemple #13
0
    def get_cluster(self):
        """ Generates report files containing information about the block where a hit is found, along with the contents of the block itself. """
        self.inode_from_block = {}
        self.inode_status = {}
        self.path_from_inode = {}
        self.path_from_inode_del = {}

        # Creating relation between every inode and its blocks takes a long time.
        # Searching only the required blocks, although slower one by one, colud be faster if the list is short
        blocks_thereshold = 20000  # it takes about an hour
        sum_blocks = 0
        for kw, parts in self.blocks.items():
            for p in parts:
                sum_blocks += len(parts[p])
        if sum_blocks > blocks_thereshold:
            for p in self.disk.partitions:
                if not p.isMountable or p.filesystem == "NoName":
                    continue
                self.inode_from_block['p{}'.format(
                    p.partition)] = self.fs_object.load_inode_from_block(
                        partition='p{}'.format(p.partition))

        # Get the necessary files relating inodes with paths and status
        for p in self.disk.partitions:
            if not p.isMountable or p.filesystem == "NoName":
                continue
            part_name = 'p{}'.format(p.partition)
            self.inode_status[part_name] = self.fs_object.load_inode_status(
                partition=part_name)
            self.path_from_inode[
                part_name] = self.fs_object.load_path_from_inode(
                    partition=part_name)
            self.path_from_inode_del[
                part_name] = self.fs_object.load_path_from_inode(
                    partition=part_name, deleted=True)

        self.used_blocks = defaultdict(set)
        self.block_inodes = defaultdict(dict)

        for kw in self.blocks:
            all_file = os.path.join(self.search_path, "all_{}".format(kw))
            if check_file(all_file) and os.path.getsize(all_file) != 0:
                self.logger().info(
                    'File {} already generated'.format(all_file))
                continue
            with open(all_file, "wb") as all_stream:
                for entry in self.all_info(self.blocks[kw], kw):
                    all_stream.write(entry)
Exemple #14
0
    def run(self, path=""):
        self.disk = getSourceImage(self.myconfig)
        if not self.disk.exists():
            self.logger().error(self.disk)
            return

        self.source = self.myconfig('source')
        self.outFolder = self.myconfig('deleteddir')
        check_directory(self.outFolder, create=True)

        # Set maximal dates for later update
        self.firstDate = datetime.date.today() + datetime.timedelta(days=365)
        self.lastDate = datetime.date(1970, 1, 1)

        # Process Timeline deleted files
        self.timelineBodyFile = os.path.join(self.myconfig('timelinesdir'), '{}_BODY.csv'.format(self.source))
        check_file(self.timelineBodyFile, error_missing=True)
        # cmd = r"grep '(deleted' {} | grep -v FILE_NAME | cut -d'|' -f2 | sed 's_^[0-9-][0-9-]*/mnt/\(.*\) (deleted.*$_\1_' | sort -u".format(self.timelineBodyFile)
        cmd = r"grep '(deleted' {} | grep -v '\$FILE_NAME' | cut -d'|' -f2,3,7".format(self.timelineBodyFile)
        deletedTimelineFiles = shell_command(cmd)
        df_timeline = self.get_dataframe(deletedTimelineFiles, 'timeline')

        # Process Recycle
        self.recycleFile = os.path.join(self.myconfig('recycledir'), 'recycle_bin.csv')
        check_file(self.recycleFile, error_missing=True)
        df_recycle = self.get_dataframe(self.recycleFile, 'recycle')

        # Process UsnJrnl and INDX
        df_usnjrnl = pd.DataFrame()
        df_indx = pd.DataFrame()
        for p in self.disk.partitions:
            self.partName = ''.join(['p', p.partition])
            if p.isMountable:

                self.usnJrnlFile = os.path.join(self.myconfig('journaldir'), 'UsnJrnl_{}.csv'.format(p.partition))
                check_file(self.usnJrnlFile, error_missing=True)
                df_u = self.get_dataframe(shell_command(r"grep 'DELETE CLOSE' {} | cut -d',' -f 1,2,4".format(self.usnJrnlFile)), 'usnjrnl')

                self.indxFile = os.path.join(self.myconfig('timelinesdir'), '{}_INDX_timeline.csv'.format(p.partition))
                if not check_file(self.indxFile):
                    df_i = pd.DataFrame()
                # cmd = "grep -v 'SHORT FILENAME FORMAT' {} | grep -v 'NOT OBTAINED' | grep -v 'invalid MFTReference' | cut -d ';' -f 3,4,5,7".format(self.indxFile)   # real
                # cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | grep -va 'invalid MFTReference' | cut -d ';' -f 2,5,9,14 ".format(self.indxFile)  # unsorted
                # cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | cut -d ';' -f 2,5,9,14 ".format(self.indxFile)  # unsorted
                cmd = r"tail -n +2 {} | grep -va 'SHORT FILENAME FORMAT' | grep -va 'NOT OBTAINED' | cut -d ';' -f 3,4,6,7,9 ".format(self.indxFile)  # real
                df_i = self.get_dataframe(shell_command(cmd), 'indx')

                df_usnjrnl = self.join_dataframes(df_usnjrnl, df_u)
                df_indx = self.join_dataframes(df_indx, df_i)

        # TODO: timeline_all does not need columns source or reliable
        # Compare Timeline against INDX to extract unique (assuming deleted) files in INDX
        cmd = r"cut -d'|' -f2 {} | grep -v '\$FILE_NAME'".format(self.timelineBodyFile)
        df_all_timeline = self.get_dataframe(shell_command(cmd), 'timeline_all')
        self.logger().debug('Obtaining unique files in INDX')
        df_indx = self.get_deleted_in_INDX(df_all_timeline, df_indx)

        # Create a global dataframe with all artifacts
        self.logger().info('Combining artifacts to create a full list of deleted files')
        df_global = self.combine_artifacts([df_usnjrnl, df_recycle, df_timeline, df_indx])
        print(df_global.shape, df_global.columns)
        duplicated_bin = df_global.duplicated('Filename', keep='first')  # First sources have precedence
        self.logger().info('Found {} duplicated files merging sources'.format(duplicated_bin.sum()))
        print('before dropping', df_global.shape)
        df_global = df_global[~duplicated_bin]
        # df_global.drop_duplicates('Filename', keep='first', inplace=True)
        print('after dropping', df_global.shape)
        print(df_global.columns)
        print(df_global.head())

        # Save global DataFrame
        # outfile = os.path.join(self.outFolder, '{}_deleted.csv'.format(self.source))
        outfile = '/home/pgarcia/global_deleted.csv'
        with open(outfile, 'w') as f:
            f.write(df_global.to_csv(index=False))

        # exit()
        # Create number of files summary based on day, hour and partition
        self.get_stats(self.join_dataframes(df_usnjrnl, df_recycle), 'all')
Exemple #15
0
    def search_strings(self, kw, regex):
        """ Generates a string search and yields hits. Also stores blocks where there's a match for the keyword 'kw'.

        Parameters:
            kw (str): keyword name
            regex (str): regular expression associated to keyword

        Yields:
            Dictionaries containing partition, block, offset and string match
        """
        self.logger().info('Searching keyword {} with regex {}'.format(
            kw, regex))

        partitions = {
            p.partition: [p.loop if p.loop != "" else "", p.clustersize]
            for p in self.disk.partitions
        }
        blocks = {}
        for p in self.disk.partitions:
            blocks.update({''.join(['p', p.partition]): set()})

        # In string files to search, all characters are lowercase, so the '-i' option is no needed
        grep = self.myconfig('grep', '/bin/grep')
        args = "-H" if kw == regex else "-HP"
        regex_search = [regex] if regex else [kw]
        search_command = '{} {} '.format(grep, args) + '"{regex}" "{path}"'
        module = base.job.load_module(self.config,
                                      'base.commands.RegexFilter',
                                      extra_config=dict(
                                          cmd=search_command,
                                          keyword_list=regex_search,
                                          from_dir=self.string_path))

        srch = re.compile(r"(p\d{1,2})_strings_?[\w.]+:\s*(\d+)\s+(.*)")
        for f in os.listdir(self.string_path):
            for match in module.run(os.path.join(self.string_path, f)):
                line = match['match']
                aux = srch.match(line)
                if not aux:
                    continue

                pname, offset, string = aux.group(1), aux.group(2), aux.group(
                    3)
                pt = pname[1:]
                bsize = int(partitions[pt][1])

                try:
                    blk = int(offset) // bsize
                    if blk not in self.block_status[pt]:
                        self.block_status[pt][
                            blk] = self.fs_object.cluster_allocation_status(
                                pname, str(blk))
                    status = self.block_status[pt].get(blk)
                except Exception as exc:
                    self.logger().error('Error searching {} in line {}'.format(
                        srch, line))
                    raise exc

                if blk not in blocks[pname]:  # new block
                    blocks[pname].add(blk)

                yield OrderedDict([('Partition', pname),
                                   ('Offset', int(offset)), ('Block', blk),
                                   ('Status', status), ('String', string)])

        # Save blocks where a kw has been found
        if not check_file(self.blocks_file_path):
            self.save_blocks_file(blocks, kw)
Exemple #16
0
    def characterize_Windows(self):
        """ Characterize Windows partitions from registry files and timeline. """

        hives_dir = self.myconfig('hivesdir')

        # Check registry is parsed. Generate the minimum files needed otherwise
        ripplugins_file = self.myconfig('ripplugins')
        if not check_directory(hives_dir):
            module = base.job.load_module(
                self.config,
                'plugins.windows.RVT_autorip.Autorip',
                extra_config=dict(ripplugins=ripplugins_file))
            list(module.run())

        # Get the autorip outputfile associated with each necessary plugin
        with open(ripplugins_file) as rf:
            ripplugins = json.load(rf)
        used_plugins = [
            'winnt_cv', 'shutdown', 'timezone', 'lastloggedon',
            'processor_architecture', 'compname', 'samparse', 'profilelist'
        ]
        os_plugins = [
            'winnt_cv', 'shutdown', 'timezone', 'lastloggedon',
            'processor_architecture', 'compname'
        ]
        plugin_files = {
            plug: p['file']
            for plug in used_plugins for p in ripplugins
            if plug in p['plugins']
        }

        plugin_fields = {
            'winnt_cv': [
                '  ProductName', '  CurrentVersion', '  InstallationType',
                '  EditionID', '  CurrentBuild', '  ProductId',
                '  RegisteredOwner', '  RegisteredOrganization',
                '  InstallDate'
            ],
            'shutdown': ['  ShutdownTime'],
            'processor_architecture': ['PROCESSOR_ARCHITECTURE'],
            'compname': ['ComputerName']
        }

        field_names = {
            '  ProductName': 'ProductName',
            '  CurrentVersion': 'CurrentVersion',
            '  InstallationType': 'InstallationType',
            '  EditionID': 'EditionID',
            '  CurrentBuild': 'CurrentBuild',
            '  ProductId': 'ProductId',
            '  RegisteredOwner': 'RegisteredOwner',
            '  RegisteredOrganization': 'RegisteredOrganization',
            '  InstallDate': 'InstallDate',
            '  ShutdownTime': 'ShutdownTime',
            '  TimeZoneKeyName': 'TimeZone',
            'PROCESSOR_ARCHITECTURE': 'ProcessorArchitecture',
            'ComputerName': 'ComputerName'
        }

        partitions = [
            folder for folder in sorted(os.listdir(self.myconfig('mountdir')))
            if folder.startswith('p')
        ]

        # Define self.ntusers, that gets the creation date of NTUSER.DAT for every user and partition
        self.make_ntuser_timeline()

        # Main loop to populate os_info
        os_info = defaultdict(dict)
        for part in partitions:
            for plug in os_plugins:
                hivefile = os.path.join(
                    hives_dir, '{}_{}.txt'.format(plugin_files[plug], part))
                if not check_file(hivefile):
                    continue
                with open(hivefile) as f_in:
                    if plug == 'lastloggedon':
                        for line in f_in:
                            if line.startswith('LastLoggedOn'):
                                f_in.readline()
                                last_write = f_in.readline()[11:].rstrip('\n')
                                f_in.readline()
                                last_user = f_in.readline()[22:].rstrip('\n')
                                os_info[part][
                                    'LastLoggedOn'] = '{} ({})'.format(
                                        last_write, last_user)
                                break
                        continue
                    elif plug == 'timezone':
                        for line in f_in:
                            if line.startswith('TimeZoneInformation'):
                                bias, tz_name = '', ''
                                while not line.startswith(
                                        '....................') and line != "":
                                    line = f_in.readline()
                                    if line.startswith('  Bias'):
                                        bias = line[line.find('('):].rstrip(
                                            '\n')
                                    if line.startswith('  TimeZoneKeyName'):
                                        line = line[len('  TimeZoneKeyName') +
                                                    3:].rstrip('\n')
                                        tz_name = line[:line.find('Time') + 4]
                                os_info[part]['TimeZone'] = '{} {}'.format(
                                    tz_name, bias)
                                break
                        continue

                    for field in plugin_fields[plug]:
                        f_in.seek(0)
                        for line in f_in:
                            if line.startswith(field):
                                os_info[part][
                                    field_names[field]] = line[len(field) +
                                                               3:].rstrip('\n')
                                break

            # Skip displaying partition info if it does not contain an OS
            if not os_info.get(part, None):
                self.logger().debug(
                    'No OS information for partition {}'.format(part))
                continue

            # Users Info
            hivefile = os.path.join(
                hives_dir, '{}_{}.txt'.format(plugin_files['samparse'], part))
            line = '  '
            users = []
            user_profiles = []
            if check_file(hivefile):
                with open(hivefile) as f_in:
                    # Parse samparse
                    while not line.startswith('profilelist') and line != "":
                        line = f_in.readline()

                        aux = re.search(r"Username\s*:\s*(.*)\n", line)
                        if aux:
                            user = [aux.group(1), "", ""]
                            while line != "\n":
                                line = f_in.readline()
                                aux = re.search(
                                    r"Account Created\s*:\s*(.*)\n", line)
                                if aux:
                                    aux1 = aux.group(1).replace("  ", " ")
                                    date = datetime.datetime.strptime(
                                        aux1, '%a %b %d %H:%M:%S %Y Z')
                                    user[1] = date.strftime(
                                        '%d-%m-%Y %H:%M:%S UTC')
                                    continue
                                aux = re.search(
                                    r"Last Login Date\s*:\s*(.*)\n",
                                    line)  # TODO: check this field is reliable
                                if aux:
                                    if aux.group(1).find("Never") == -1:
                                        aux1 = aux.group(1).replace("  ", " ")
                                        date = datetime.datetime.strptime(
                                            aux1, '%a %b %d %H:%M:%S %Y Z')
                                        user[2] = date.strftime(
                                            '%d-%m-%Y %H:%M:%S UTC')
                                    else:
                                        user[2] = "Never"
                                    users.append(user)
                                    break

                    # Parse profilelist
                    line = '  '
                    while not line.startswith(
                            '....................') and line != "":
                        line = f_in.readline()
                        aux = re.match(r"Path\s*:\s*.:.Users.(.*)",
                                       line.strip())
                        if aux:
                            # import pudb; pudb.set_trace()
                            user = [aux.group(1), "", ""]
                            while line != "\n":
                                line = f_in.readline()
                                aux = re.search(r"LastWrite\s*:\s*(.*)",
                                                line.strip())
                                if aux:
                                    aux1 = aux.group(1).replace("  ", " ")
                                    date = datetime.datetime.strptime(
                                        aux1, '%a %b %d %H:%M:%S %Y (UTC)')
                                    user[2] = date.strftime(
                                        "%d-%m-%Y %H:%M:%S UTC")
                                    user_profiles.append(user)

            # Get creation date from NTUSER.DAT if not found in profilelist
            for i in user_profiles:
                for j in self.ntusers[part]:
                    if i[0] == j[0] and i[1] == "":
                        i[1] = j[1].strftime('%d-%m-%Y %H:%M:%S UTC')
            os_info[part]["users"] = users
            os_info[part]["user_profiles"] = user_profiles
        return os_info
Exemple #17
0
 def exists(self):
     """ Returns True if the disk was found in the morgue. """
     return check_file(self.imagefile)