Ejemplo n.º 1
0
    def pehash(self):
        if not HAVE_PEHASH:
            self.log('error', "PEhash is missing. Please copy PEhash to the modules directory of Viper")
            return

        current_pehash = None
        if __sessions__.is_set():
            current_pehash = calculate_pehash(__sessions__.current.file.path)
            self.log('info', "PEhash: {0}".format(bold(current_pehash)))

        if self.args.all or self.args.cluster or self.args.scan:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                pe_hash = calculate_pehash(sample_path)
                if pe_hash:
                    rows.append((sample.name, sample.md5, pe_hash))

        if self.args.all:
            self.log('info', "PEhash for all files:")
            header = ['Name', 'MD5', 'PEhash']
            self.log('table', dict(header=header, rows=rows))

        elif self.args.cluster:
            self.log('info', "Clustering files by PEhash...")

            cluster = {}
            for sample_name, sample_md5, pe_hash in rows:
                cluster.setdefault(pe_hash, []).append([sample_name, sample_md5])

            for item in cluster.items():
                if len(item[1]) > 1:
                    self.log('info', "PEhash cluster {0}:".format(bold(item[0])))
                    self.log('table', dict(header=['Name', 'MD5'], rows=item[1]))

        elif self.args.scan:
            if __sessions__.is_set() and current_pehash:
                self.log('info', "Finding matching samples...")

                matches = []
                for row in rows:
                    if row[1] == __sessions__.current.file.md5:
                        continue

                    if row[2] == current_pehash:
                        matches.append([row[0], row[1]])

                if matches:
                    self.log('table', dict(header=['Name', 'MD5'], rows=matches))
                else:
                    self.log('info', "No matches found")
Ejemplo n.º 2
0
    def compiletime(self):
        def get_compiletime(pe):
            return datetime.datetime.fromtimestamp(
                pe.FILE_HEADER.TimeDateStamp)

        if not self.__check_session():
            return

        compile_time = get_compiletime(self.pe)
        self.log('info', "Compile Time: {0}".format(bold(compile_time)))

        if self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                    cur_compile_time = get_compiletime(cur_pe)
                except:
                    continue

                if compile_time == cur_compile_time:
                    matches.append([sample.name, sample.md5, cur_compile_time])
                else:
                    if self.args.window:
                        if cur_compile_time > compile_time:
                            delta = (cur_compile_time - compile_time)
                        elif cur_compile_time < compile_time:
                            delta = (compile_time - cur_compile_time)

                        delta_minutes = int(delta.total_seconds()) / 60
                        if delta_minutes <= self.args.window:
                            matches.append(
                                [sample.name, sample.md5, cur_compile_time])

            self.log('info',
                     "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log(
                    'table',
                    dict(header=['Name', 'MD5', 'Compile Time'], rows=matches))
Ejemplo n.º 3
0
    def compiletime(self):

        def get_compiletime(pe):
            return datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)

        if not self.__check_session():
            return

        compile_time = get_compiletime(self.pe)
        self.log('info', "Compile Time: {0}".format(bold(compile_time)))

        if self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                    cur_compile_time = get_compiletime(cur_pe)
                except:
                    continue

                if compile_time == cur_compile_time:
                    matches.append([sample.name, sample.md5, cur_compile_time])
                else:
                    if self.args.window:
                        if cur_compile_time > compile_time:
                            delta = (cur_compile_time - compile_time)
                        elif cur_compile_time < compile_time:
                            delta = (compile_time - cur_compile_time)

                        delta_minutes = int(delta.total_seconds()) / 60
                        if delta_minutes <= self.args.window:
                            matches.append([sample.name, sample.md5, cur_compile_time])

            self.log('info', "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'MD5', 'Compile Time'], rows=matches))
Ejemplo n.º 4
0
    def ghiro(self):
        if not HAVE_REQUESTS:
            self.log(
                'error',
                "Missing dependency, install requests (`pip install requests`)"
            )
            return

        payload = dict(private='true', json='true')
        files = dict(image=open(__sessions__.current.file.path, 'rb'))

        response = requests.post('http://www.imageforensic.org/api/submit/',
                                 data=payload,
                                 files=files)
        results = response.json()

        if results['success']:
            report = results['report']

            if len(report['signatures']) > 0:
                self.log('', bold("Signatures:"))

                for signature in report['signatures']:
                    self.log('item', signature['description'])
        else:
            self.log('error', "The analysis failed")
Ejemplo n.º 5
0
    def get_config(self, family):
        if not __sessions__.is_set():
            self.log('error', "No session opened")
            return

        try:
            module = importlib.import_module('modules.rats.{0}'.format(family))
        except ImportError:
            self.log('error',
                     "There is no module for family {0}".format(bold(family)))
            return

        config = module.config(__sessions__.current.file.data)
        if not config:
            self.log('error', "No Configuration Detected")
            return

        rows = []
        for key, value in config.items():
            rows.append([key, value])

        rows = sorted(rows, key=lambda entry: entry[0])

        self.log('info', "Configuration:")
        self.log('table', dict(header=['Key', 'Value'], rows=rows))
Ejemplo n.º 6
0
    def peid(self):
        def get_signatures():
            with file(os.path.join(CIRTKIT_ROOT, 'data/peid/UserDB.TXT'),
                      'rt') as f:
                sig_data = f.read()

            signatures = peutils.SignatureDatabase(data=sig_data)

            return signatures

        def get_matches(pe, signatures):
            matches = signatures.match_all(pe, ep_only=True)
            return matches

        if not self.__check_session():
            return

        signatures = get_signatures()
        peid_matches = get_matches(self.pe, signatures)

        if peid_matches:
            self.log('info', "PEiD Signatures:")
            for sig in peid_matches:
                if type(sig) is list:
                    self.log('item', sig[0])
                else:
                    self.log('item', sig)
        else:
            self.log('info', "No PEiD signatures matched.")

        if self.args.scan and peid_matches:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                    cur_peid_matches = get_matches(cur_pe, signatures)
                except:
                    continue

                if peid_matches == cur_peid_matches:
                    matches.append([sample.name, sample.sha256])

            self.log('info',
                     "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'SHA256'],
                                       rows=matches))
Ejemplo n.º 7
0
    def peid(self):

        def get_signatures():
            with file(os.path.join(CIRTKIT_ROOT, 'data/peid/UserDB.TXT'), 'rt') as f:
                sig_data = f.read()

            signatures = peutils.SignatureDatabase(data=sig_data)

            return signatures

        def get_matches(pe, signatures):
            matches = signatures.match_all(pe, ep_only=True)
            return matches

        if not self.__check_session():
            return

        signatures = get_signatures()
        peid_matches = get_matches(self.pe, signatures)

        if peid_matches:
            self.log('info', "PEiD Signatures:")
            for sig in peid_matches:
                if type(sig) is list:
                    self.log('item', sig[0])
                else:
                    self.log('item', sig)
        else:
            self.log('info', "No PEiD signatures matched.")

        if self.args.scan and peid_matches:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                    cur_peid_matches = get_matches(cur_pe, signatures)
                except:
                    continue

                if peid_matches == cur_peid_matches:
                    matches.append([sample.name, sample.sha256])

            self.log('info', "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
Ejemplo n.º 8
0
    def ghiro(self):
        if not HAVE_REQUESTS:
            self.log('error', "Missing dependency, install requests (`pip install requests`)")
            return

        payload = dict(private='true', json='true')
        files = dict(image=open(__sessions__.current.file.path, 'rb'))

        response = requests.post('http://www.imageforensic.org/api/submit/', data=payload, files=files)
        results = response.json()

        if results['success']:
            report = results['report']

            if len(report['signatures']) > 0:
                self.log('', bold("Signatures:"))

                for signature in report['signatures']:
                    self.log('item', signature['description'])
        else:
            self.log('error', "The analysis failed")
Ejemplo n.º 9
0
    def get_config(self, family):
        if not __sessions__.is_set():
            self.log('error', "No session opened")
            return

        try:
            module = importlib.import_module('modules.rats.{0}'.format(family))
        except ImportError:
            self.log('error', "There is no module for family {0}".format(bold(family)))
            return

        config = module.config(__sessions__.current.file.data)
        if not config:
            self.log('error', "No Configuration Detected")
            return

        rows = []
        for key, value in config.items():
            rows.append([key, value])

        rows = sorted(rows, key=lambda entry: entry[0])

        self.log('info', "Configuration:")
        self.log('table', dict(header=['Key', 'Value'], rows=rows))
Ejemplo n.º 10
0
    def language(self):
        def get_iat(pe):
            iat = []
            if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
                for peimport in pe.DIRECTORY_ENTRY_IMPORT:
                    iat.append(peimport.dll)

            return iat

        def check_module(iat, match):
            for imp in iat:
                if imp.find(match) != -1:
                    return True

            return False

        def is_cpp(data, cpp_count):
            for line in data:
                if 'type_info' in line or 'RTTI' in line:
                    cpp_count += 1
                    break

            if cpp_count == 2:
                return True

            return False

        def is_delphi(data):
            for line in data:
                if 'Borland' in line:
                    path = line.split('\\')
                    for p in path:
                        if 'Delphi' in p:
                            return True
            return False

        def is_vbdotnet(data):
            for line in data:
                if 'Compiler' in line:
                    stuff = line.split('.')
                    if 'VisualBasic' in stuff:
                        return True

            return False

        def is_autoit(data):
            for line in data:
                if 'AU3!' in line:
                    return True

            return False

        def is_packed(pe):
            for section in pe.sections:
                if section.get_entropy() > 7:
                    return True

            return False

        def get_strings(content):
            regexp = '[\x30-\x39\x41-\x5f\x61-\x7a\-\.:]{4,}'
            return re.findall(regexp, content)

        def find_language(iat, sample, content):
            dotnet = False
            cpp_count = 0
            found = None

            # VB check
            if check_module(iat, 'VB'):
                self.log(
                    'info', "{0} - Possible language: Visual Basic".format(
                        sample.name))
                return True

            # .NET check
            if check_module(iat, 'mscoree.dll') and not found:
                dotnet = True
                found = '.NET'

            # C DLL check
            if not found and (check_module(iat, 'msvcr') or check_module(
                    iat, 'MSVCR') or check_module(iat, 'c++')):
                cpp_count += 1

            if not found:
                data = get_strings(content)

                if is_cpp(data, cpp_count) and not found:
                    found = 'CPP'
                if not found and cpp_count == 1:
                    found = 'C'
                if not dotnet and is_delphi(data) and not found:
                    found = 'Delphi'
                if dotnet and is_vbdotnet(data):
                    found = 'Visual Basic .NET'
                if is_autoit(data) and not found:
                    found = 'AutoIt'

            return found

        if not self.__check_session():
            return

        if is_packed(self.pe):
            self.log(
                'warning',
                "Probably packed, the language guess might be unreliable")

        language = find_language(get_iat(self.pe), __sessions__.current.file,
                                 __sessions__.current.file.data)

        if language:
            self.log('info', "Probable language: {0}".format(bold(language)))
        else:
            self.log('error', "Programming language not identified")
            return

        if self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)

                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                except pefile.PEFormatError as e:
                    continue

                cur_packed = ''
                if is_packed(cur_pe):
                    cur_packed = 'Yes'

                cur_language = find_language(get_iat(cur_pe), sample,
                                             open(sample_path, 'rb').read())

                if not cur_language:
                    continue

                if cur_language == language:
                    matches.append([sample.name, sample.md5, cur_packed])

            if matches:
                self.log(
                    'table',
                    dict(header=['Name', 'MD5', 'Is Packed'], rows=matches))
            else:
                self.log('info', "No matches found")
Ejemplo n.º 11
0
    def security(self):
        def get_certificate(pe):
            # TODO: this only extract the raw list of certificate data.
            # I need to parse them, extract single certificates and perhaps return
            # the PEM data of the first certificate only.
            pe_security_dir = pefile.DIRECTORY_ENTRY[
                'IMAGE_DIRECTORY_ENTRY_SECURITY']
            address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[
                pe_security_dir].VirtualAddress
            #  size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size

            if address:
                return pe.write()[address + 8:]
            else:
                return None

        def get_signed_samples(current=None, cert_filter=None):
            db = Database()
            samples = db.find(key='all')

            results = []
            for sample in samples:
                # Skip if it's the same file.
                if current:
                    if sample.sha256 == current:
                        continue

                # Obtain path to the binary.
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                # Open PE instance.
                try:
                    cur_pe = pefile.PE(sample_path)
                except:
                    continue

                cur_cert_data = get_certificate(cur_pe)

                if not cur_cert_data:
                    continue

                cur_cert_md5 = get_md5(cur_cert_data)

                if cert_filter:
                    if cur_cert_md5 == cert_filter:
                        results.append([sample.name, sample.md5])
                else:
                    results.append([sample.name, sample.md5, cur_cert_md5])

            return results

        if self.args.all:
            self.log('info',
                     "Scanning the repository for all signed samples...")

            all_of_them = get_signed_samples()

            self.log('info',
                     "{0} signed samples found".format(bold(len(all_of_them))))

            if len(all_of_them) > 0:
                self.log(
                    'table',
                    dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them))

            return

        if not self.__check_session():
            return

        cert_data = get_certificate(self.pe)

        if not cert_data:
            self.log('warning', "No certificate found")
            return

        cert_md5 = get_md5(cert_data)

        self.log('info',
                 "Found certificate with MD5 {0}".format(bold(cert_md5)))

        if self.args.dump:
            cert_path = os.path.join(
                self.args.dump,
                '{0}.crt'.format(__sessions__.current.file.sha256))
            with open(cert_path, 'wb+') as cert_handle:
                cert_handle.write(cert_data)

            self.log('info', "Dumped certificate to {0}".format(cert_path))
            self.log(
                'info', "You can parse it using the following command:\n\t" +
                bold("openssl pkcs7 -inform DER -print_certs -text -in {0}".
                     format(cert_path)))

        # TODO: do scan for certificate's serial number.
        if self.args.scan:
            self.log('info',
                     "Scanning the repository for matching signed samples...")

            matches = get_signed_samples(
                current=__sessions__.current.file.sha256, cert_filter=cert_md5)

            self.log('info',
                     "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'SHA256'],
                                       rows=matches))

        # TODO: this function needs to be better integrated with the rest of the command.
        # TODO: need to add more error handling and figure out why so many samples are failing.
        if self.args.check:
            if not HAVE_VERIFYSIGS:
                self.log(
                    'error',
                    "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)"
                )
                return

            try:
                auth, computed_content_hash = get_auth_data(
                    __sessions__.current.file.path)
            except Exception as e:
                self.log('error',
                         "Unable to parse PE certificate: {0}".format(str(e)))
                return

            try:
                auth.ValidateAsn1()
                auth.ValidateHashes(computed_content_hash)
                auth.ValidateSignatures()
                auth.ValidateCertChains(time.gmtime())
            except Exception, e:
                self.log(
                    'error',
                    "Unable to validate PE certificate: {0}".format(str(e)))
                return

            self.log('info', bold('Signature metadata:'))
            self.log('info', 'Program name: {0}'.format(auth.program_name))
            self.log('info', 'URL: {0}'.format(auth.program_url))

            if auth.has_countersignature:
                self.log(
                    'info',
                    bold('Countersignature is present. Timestamp: {0} UTC'.
                         format(
                             time.asctime(time.gmtime(
                                 auth.counter_timestamp)))))
            else:
                self.log('info', bold('Countersignature is not present.'))

            self.log('info', bold('Binary is signed with cert issued by:'))
            self.log('info', '{0}'.format(auth.signing_cert_id[0]))

            self.log('info', '{0}'.format(auth.cert_chain_head[2][0]))
            self.log(
                'info', 'Chain not before: {0} UTC'.format(
                    time.asctime(time.gmtime(auth.cert_chain_head[0]))))
            self.log(
                'info', 'Chain not after: {0} UTC'.format(
                    time.asctime(time.gmtime(auth.cert_chain_head[1]))))

            if auth.has_countersignature:
                self.log('info', bold('Countersig chain head issued by:'))
                self.log('info', '{0}'.format(auth.counter_chain_head[2]))
                self.log(
                    'info', 'Countersig not before: {0} UTC'.format(
                        time.asctime(time.gmtime(auth.counter_chain_head[0]))))
                self.log(
                    'info', 'Countersig not after: {0} UTC'.format(
                        time.asctime(time.gmtime(auth.counter_chain_head[1]))))

            self.log('info', bold('Certificates:'))
            for (issuer, serial), cert in auth.certificates.items():
                self.log('info', 'Issuer: {0}'.format(issuer))
                self.log('info', 'Serial: {0}'.format(serial))
                subject = cert[0][0]['subject']
                subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0]))
                self.log('info', 'Subject: {0}'.format(subject_dn))
                not_before = cert[0][0]['validity']['notBefore']
                not_after = cert[0][0]['validity']['notAfter']
                not_before_time = not_before.ToPythonEpochTime()
                not_after_time = not_after.ToPythonEpochTime()
                self.log(
                    'info', 'Not Before: {0} UTC ({1})'.format(
                        time.asctime(time.gmtime(not_before_time)),
                        not_before[0]))
                self.log(
                    'info', 'Not After: {0} UTC ({1})'.format(
                        time.asctime(time.gmtime(not_after_time)),
                        not_after[0]))

            if auth.trailing_data:
                self.log(
                    'info',
                    'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}'
                    .format(len(auth.trailing_data),
                            auth.trailing_data.encode('hex')))
Ejemplo n.º 12
0
    def imphash(self):
        if self.args.scan and self.args.cluster:
            self.log('error', "You selected two exclusive options, pick one")
            return

        if self.args.cluster:
            self.log('info', "Clustering all samples by imphash...")

            db = Database()
            samples = db.find(key='all')

            cluster = {}
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_imphash = pefile.PE(sample_path).get_imphash()
                except:
                    continue

                if cur_imphash not in cluster:
                    cluster[cur_imphash] = []

                cluster[cur_imphash].append([sample.sha256, sample.name])

            for cluster_name, cluster_members in cluster.items():
                # Skipping clusters with only one entry.
                if len(cluster_members) == 1:
                    continue

                self.log('info',
                         "Imphash cluster {0}".format(bold(cluster_name)))

                self.log('table',
                         dict(header=['MD5', 'Name'], rows=cluster_members))

            return

        if self.__check_session():
            try:
                imphash = self.pe.get_imphash()
            except AttributeError:
                self.log(
                    'error',
                    "No imphash support, upgrade pefile to a version >= 1.2.10-139 (`pip install --upgrade pefile`)"
                )
                return

            self.log('info', "Imphash: {0}".format(bold(imphash)))

            if self.args.scan:
                self.log('info',
                         "Scanning the repository for matching samples...")

                db = Database()
                samples = db.find(key='all')

                matches = []
                for sample in samples:
                    if sample.sha256 == __sessions__.current.file.sha256:
                        continue

                    sample_path = get_sample_path(sample.sha256)
                    if not os.path.exists(sample_path):
                        continue

                    try:
                        cur_imphash = pefile.PE(sample_path).get_imphash()
                    except:
                        continue

                    if imphash == cur_imphash:
                        matches.append([sample.name, sample.sha256])

                self.log(
                    'info',
                    "{0} relevant matches found".format(bold(len(matches))))

                if len(matches) > 0:
                    self.log('table',
                             dict(header=['Name', 'SHA256'], rows=matches))
Ejemplo n.º 13
0
    def resources(self):

        # Use this function to retrieve resources for the given PE instance.
        # Returns all the identified resources with indicators and attributes.
        def get_resources(pe):
            resources = []
            if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
                count = 1
                for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
                    try:
                        resource = {}

                        if resource_type.name is not None:
                            name = str(resource_type.name)
                        else:
                            name = str(
                                pefile.RESOURCE_TYPE.get(
                                    resource_type.struct.Id))

                        if name is None:
                            name = str(resource_type.struct.Id)

                        if hasattr(resource_type, 'directory'):
                            for resource_id in resource_type.directory.entries:
                                if hasattr(resource_id, 'directory'):
                                    for resource_lang in resource_id.directory.entries:
                                        data = pe.get_data(
                                            resource_lang.data.struct.
                                            OffsetToData,
                                            resource_lang.data.struct.Size)
                                        filetype = get_type(data)
                                        md5 = get_md5(data)
                                        language = pefile.LANG.get(
                                            resource_lang.data.lang, None)
                                        sublanguage = pefile.get_sublang_name_for_lang(
                                            resource_lang.data.lang,
                                            resource_lang.data.sublang)
                                        offset = ('%-8s' %
                                                  hex(resource_lang.data.struct
                                                      .OffsetToData)).strip()
                                        size = (
                                            '%-8s' %
                                            hex(resource_lang.data.struct.Size)
                                        ).strip()

                                        resource = [
                                            count, name, offset, md5, size,
                                            filetype, language, sublanguage
                                        ]

                                        # Dump resources if requested to and if the file currently being
                                        # processed is the opened session file.
                                        # This is to avoid that during a --scan all the resources being
                                        # scanned are dumped as well.
                                        if (self.args.open or self.args.dump
                                            ) and pe == self.pe:
                                            if self.args.dump:
                                                folder = self.args.dump
                                            else:
                                                folder = tempfile.mkdtemp()

                                            resource_path = os.path.join(
                                                folder, '{0}_{1}_{2}'.format(
                                                    __sessions__.current.file.
                                                    md5, offset, name))
                                            resource.append(resource_path)

                                            with open(resource_path,
                                                      'wb') as resource_handle:
                                                resource_handle.write(data)

                                        resources.append(resource)

                                        count += 1
                    except Exception as e:
                        self.log('error', e)
                        continue

            return resources

        if not self.__check_session():
            return

        # Obtain resources for the currently opened file.
        resources = get_resources(self.pe)

        if not resources:
            self.log('warning', "No resources found")
            return

        headers = [
            '#', 'Name', 'Offset', 'MD5', 'Size', 'File Type', 'Language',
            'Sublanguage'
        ]
        if self.args.dump or self.args.open:
            headers.append('Dumped To')

        self.log('table', dict(header=headers, rows=resources))

        # If instructed, open a session on the given resource.
        if self.args.open:
            for resource in resources:
                if resource[0] == self.args.open:
                    __sessions__.new(resource[8])
                    return
        # If instructed to perform a scan across the repository, start looping
        # through all available files.
        elif self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            # Retrieve list of samples stored locally and available in the
            # database.
            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                # Skip if it's the same file.
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                # Obtain path to the binary.
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                # Open PE instance.
                try:
                    cur_pe = pefile.PE(sample_path)
                except:
                    continue

                # Obtain the list of resources for the current iteration.
                cur_resources = get_resources(cur_pe)
                matched_resources = []
                # Loop through entry's resources.
                for cur_resource in cur_resources:
                    # Loop through opened file's resources.
                    for resource in resources:
                        # If there is a common resource, add it to the list.
                        if cur_resource[3] == resource[3]:
                            matched_resources.append(resource[3])

                # If there are any common resources, add the entry to the list
                # of matched samples.
                if len(matched_resources) > 0:
                    matches.append([
                        sample.name, sample.md5,
                        '\n'.join(r for r in matched_resources)
                    ])

            self.log('info',
                     "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log(
                    'table',
                    dict(header=['Name', 'MD5', 'Resource MD5'], rows=matches))
Ejemplo n.º 14
0
    def entrypoint(self):
        if self.args.scan and self.args.cluster:
            self.log('error', "You selected two exclusive options, pick one")
            return

        if self.args.all:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(
                        sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                rows.append([sample.md5, sample.name, cur_ep])

            self.log(
                'table',
                dict(header=['MD5', 'Name', 'AddressOfEntryPoint'], rows=rows))

            return

        if self.args.cluster:
            db = Database()
            samples = db.find(key='all')

            cluster = {}
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(
                        sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                if cur_ep not in cluster:
                    cluster[cur_ep] = []

                cluster[cur_ep].append([sample.md5, sample.name])

            for cluster_name, cluster_members in cluster.items():
                # Skipping clusters with only one entry.
                if len(cluster_members) == 1:
                    continue

                self.log(
                    'info', "AddressOfEntryPoint cluster {0}".format(
                        bold(cluster_name)))

                self.log('table',
                         dict(header=['MD5', 'Name'], rows=cluster_members))

            return

        if not self.__check_session():
            return

        ep = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint

        self.log('info', "AddressOfEntryPoint: {0}".format(ep))

        if self.args.scan:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(
                        sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                if ep == cur_ep:
                    rows.append([sample.md5, sample.name])

            self.log(
                'info',
                "Following are samples with AddressOfEntryPoint {0}".format(
                    bold(ep)))

            self.log('table', dict(header=['MD5', 'Name'], rows=rows))
Ejemplo n.º 15
0
    def pehash(self):
        if not HAVE_PEHASH:
            self.log(
                'error',
                "PEhash is missing. Please copy PEhash to the modules directory of Viper"
            )
            return

        current_pehash = None
        if __sessions__.is_set():
            current_pehash = calculate_pehash(__sessions__.current.file.path)
            self.log('info', "PEhash: {0}".format(bold(current_pehash)))

        if self.args.all or self.args.cluster or self.args.scan:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                pe_hash = calculate_pehash(sample_path)
                if pe_hash:
                    rows.append((sample.name, sample.md5, pe_hash))

        if self.args.all:
            self.log('info', "PEhash for all files:")
            header = ['Name', 'MD5', 'PEhash']
            self.log('table', dict(header=header, rows=rows))

        elif self.args.cluster:
            self.log('info', "Clustering files by PEhash...")

            cluster = {}
            for sample_name, sample_md5, pe_hash in rows:
                cluster.setdefault(pe_hash,
                                   []).append([sample_name, sample_md5])

            for item in cluster.items():
                if len(item[1]) > 1:
                    self.log('info',
                             "PEhash cluster {0}:".format(bold(item[0])))
                    self.log('table', dict(header=['Name', 'MD5'],
                                           rows=item[1]))

        elif self.args.scan:
            if __sessions__.is_set() and current_pehash:
                self.log('info', "Finding matching samples...")

                matches = []
                for row in rows:
                    if row[1] == __sessions__.current.file.md5:
                        continue

                    if row[2] == current_pehash:
                        matches.append([row[0], row[1]])

                if matches:
                    self.log('table', dict(header=['Name', 'MD5'],
                                           rows=matches))
                else:
                    self.log('info', "No matches found")
Ejemplo n.º 16
0
    def entrypoint(self):
        if self.args.scan and self.args.cluster:
            self.log('error', "You selected two exclusive options, pick one")
            return

        if self.args.all:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                rows.append([sample.md5, sample.name, cur_ep])

            self.log('table', dict(header=['MD5', 'Name', 'AddressOfEntryPoint'], rows=rows))

            return

        if self.args.cluster:
            db = Database()
            samples = db.find(key='all')

            cluster = {}
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                if cur_ep not in cluster:
                    cluster[cur_ep] = []

                cluster[cur_ep].append([sample.md5, sample.name])

            for cluster_name, cluster_members in cluster.items():
                # Skipping clusters with only one entry.
                if len(cluster_members) == 1:
                    continue

                self.log('info', "AddressOfEntryPoint cluster {0}".format(bold(cluster_name)))

                self.log('table', dict(header=['MD5', 'Name'],
                    rows=cluster_members))

            return

        if not self.__check_session():
            return

        ep = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint

        self.log('info', "AddressOfEntryPoint: {0}".format(ep))

        if self.args.scan:
            db = Database()
            samples = db.find(key='all')

            rows = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
                except:
                    continue

                if ep == cur_ep:
                    rows.append([sample.md5, sample.name])

            self.log('info', "Following are samples with AddressOfEntryPoint {0}".format(bold(ep)))

            self.log('table', dict(header=['MD5', 'Name'],
                rows=rows))
Ejemplo n.º 17
0
    def run(self):
        super(Fuzzy, self).run()

        if not HAVE_PYDEEP:
            self.log('error', "Missing dependency, install pydeep (`pip install pydeep`)")
            return

        arg_verbose = False
        arg_cluster = False
        if self.args:
            if self.args.verbose:
                arg_verbose = self.args.verbose
            if self.args.cluster:
                arg_cluster = self.args.cluster

            db = Database()
            samples = db.find(key='all')

            # Check if we're operating in cluster mode, otherwise we run on the
            # currently opened file.
            if arg_cluster:
                self.log('info', "Generating clusters, this might take a while...")

                clusters = dict()
                for sample in samples:
                    if not sample.ssdeep:
                        continue

                    if arg_verbose:
                        self.log('info', "Testing file {0} with ssdeep {1}".format(
                            sample.md5, sample.ssdeep))

                    clustered = False
                    for cluster_name, cluster_members in clusters.items():
                        # Check if sample is already in the cluster.
                        if sample.md5 in cluster_members:
                            continue

                        if arg_verbose:
                            self.log('info', "Testing {0} in cluser {1}".format(
                                sample.md5, cluster_name))
                        
                        for member in cluster_members:
                            if sample.md5 == member[0]:
                                continue

                            member_hash = member[0]
                            member_name = member[1]

                            member_ssdeep = db.find(key='md5', value=member_hash)[0].ssdeep
                            if pydeep.compare(sample.ssdeep, member_ssdeep) > 40:
                                if arg_verbose:
                                    self.log('info', "Found home for {0} in cluster {1}".format(
                                        sample.md5, cluster_name))

                                clusters[cluster_name].append([sample.md5, sample.name])
                                clustered = True
                                break

                    if not clustered:
                        cluster_id = len(clusters) + 1
                        clusters[cluster_id] = [[sample.md5, sample.name],]

                ordered_clusters = collections.OrderedDict(sorted(clusters.items()))

                self.log('info', "Following are the identified clusters with more than one member")

                for cluster_name, cluster_members in ordered_clusters.items():
                    # We include in the results only clusters with more than just
                    # one member.
                    if len(cluster_members) <= 1:
                        continue

                    self.log('info', "Ssdeep cluster {0}".format(bold(cluster_name)))

                    self.log('table', dict(header=['MD5', 'Name'],
                        rows=cluster_members))

            # We're running against the already opened file.
            else:
                if not __sessions__.is_set():
                    self.log('error', "No session opened")
                    return

                if not __sessions__.current.file.ssdeep:
                    self.log('error', "No ssdeep hash available for opened file")
                    return

                matches = []
                for sample in samples:
                    if sample.sha256 == __sessions__.current.file.sha256:
                        continue

                    if not sample.ssdeep:
                        continue

                    score = pydeep.compare(__sessions__.current.file.ssdeep,
                        sample.ssdeep)

                    if score > 40:
                        matches.append(['{0}%'.format(score), sample.name,
                            sample.sha256])

                    if arg_verbose:
                        self.log('info', "Match {0}%: {2} [{1}]".format(score,
                            sample.name, sample.sha256))

                self.log('info', "{0} relevant matches found".format(bold(len(matches))))

                if len(matches) > 0:
                    self.log('table', dict(header=['Score', 'Name', 'SHA256'],
                        rows=matches))
Ejemplo n.º 18
0
    def resources(self):

        # Use this function to retrieve resources for the given PE instance.
        # Returns all the identified resources with indicators and attributes.
        def get_resources(pe):
            resources = []
            if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
                count = 1
                for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
                    try:
                        resource = {}

                        if resource_type.name is not None:
                            name = str(resource_type.name)
                        else:
                            name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id))

                        if name is None:
                            name = str(resource_type.struct.Id)

                        if hasattr(resource_type, 'directory'):
                            for resource_id in resource_type.directory.entries:
                                if hasattr(resource_id, 'directory'):
                                    for resource_lang in resource_id.directory.entries:
                                        data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
                                        filetype = get_type(data)
                                        md5 = get_md5(data)
                                        language = pefile.LANG.get(resource_lang.data.lang, None)
                                        sublanguage = pefile.get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
                                        offset = ('%-8s' % hex(resource_lang.data.struct.OffsetToData)).strip()
                                        size = ('%-8s' % hex(resource_lang.data.struct.Size)).strip()

                                        resource = [count, name, offset, md5, size, filetype, language, sublanguage]

                                        # Dump resources if requested to and if the file currently being
                                        # processed is the opened session file.
                                        # This is to avoid that during a --scan all the resources being
                                        # scanned are dumped as well.
                                        if (self.args.open or self.args.dump) and pe == self.pe:
                                            if self.args.dump:
                                                folder = self.args.dump
                                            else:
                                                folder = tempfile.mkdtemp()

                                            resource_path = os.path.join(folder, '{0}_{1}_{2}'.format(__sessions__.current.file.md5, offset, name))
                                            resource.append(resource_path)

                                            with open(resource_path, 'wb') as resource_handle:
                                                resource_handle.write(data)

                                        resources.append(resource)

                                        count += 1
                    except Exception as e:
                        self.log('error', e)
                        continue

            return resources

        if not self.__check_session():
            return

        # Obtain resources for the currently opened file.
        resources = get_resources(self.pe)

        if not resources:
            self.log('warning', "No resources found")
            return

        headers = ['#', 'Name', 'Offset', 'MD5', 'Size', 'File Type', 'Language', 'Sublanguage']
        if self.args.dump or self.args.open:
            headers.append('Dumped To')

        self.log('table', dict(header=headers, rows=resources))

        # If instructed, open a session on the given resource.
        if self.args.open:
            for resource in resources:
                if resource[0] == self.args.open:
                    __sessions__.new(resource[8])
                    return
        # If instructed to perform a scan across the repository, start looping
        # through all available files.
        elif self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            # Retrieve list of samples stored locally and available in the
            # database.
            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                # Skip if it's the same file.
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                # Obtain path to the binary.
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                # Open PE instance.
                try:
                    cur_pe = pefile.PE(sample_path)
                except:
                    continue

                # Obtain the list of resources for the current iteration.
                cur_resources = get_resources(cur_pe)
                matched_resources = []
                # Loop through entry's resources.
                for cur_resource in cur_resources:
                    # Loop through opened file's resources.
                    for resource in resources:
                        # If there is a common resource, add it to the list.
                        if cur_resource[3] == resource[3]:
                            matched_resources.append(resource[3])

                # If there are any common resources, add the entry to the list
                # of matched samples.
                if len(matched_resources) > 0:
                    matches.append([sample.name, sample.md5, '\n'.join(r for r in matched_resources)])

            self.log('info', "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'MD5', 'Resource MD5'], rows=matches))
Ejemplo n.º 19
0
    def language(self):

        def get_iat(pe):
            iat = []
            if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
                for peimport in pe.DIRECTORY_ENTRY_IMPORT:
                    iat.append(peimport.dll)

            return iat

        def check_module(iat, match):
            for imp in iat:
                if imp.find(match) != -1:
                    return True

            return False

        def is_cpp(data, cpp_count):
            for line in data:
                if 'type_info' in line or 'RTTI' in line:
                    cpp_count += 1
                    break

            if cpp_count == 2:
                return True

            return False

        def is_delphi(data):
            for line in data:
                if 'Borland' in line:
                    path = line.split('\\')
                    for p in path:
                        if 'Delphi' in p:
                            return True
            return False

        def is_vbdotnet(data):
            for line in data:
                if 'Compiler' in line:
                    stuff = line.split('.')
                    if 'VisualBasic' in stuff:
                        return True

            return False

        def is_autoit(data):
            for line in data:
                if 'AU3!' in line:
                    return True

            return False

        def is_packed(pe):
            for section in pe.sections:
                if section.get_entropy() > 7:
                    return True

            return False

        def get_strings(content):
            regexp = '[\x30-\x39\x41-\x5f\x61-\x7a\-\.:]{4,}'
            return re.findall(regexp, content)

        def find_language(iat, sample, content):
            dotnet = False
            cpp_count = 0
            found = None

            # VB check
            if check_module(iat, 'VB'):
                self.log('info', "{0} - Possible language: Visual Basic".format(sample.name))
                return True

            # .NET check
            if check_module(iat, 'mscoree.dll') and not found:
                dotnet = True
                found = '.NET'

            # C DLL check
            if not found and (check_module(iat, 'msvcr') or check_module(iat, 'MSVCR') or check_module(iat, 'c++')):
                cpp_count += 1

            if not found:
                data = get_strings(content)

                if is_cpp(data, cpp_count) and not found:
                    found = 'CPP'
                if not found and cpp_count == 1:
                    found = 'C'
                if not dotnet and is_delphi(data) and not found:
                    found = 'Delphi'
                if dotnet and is_vbdotnet(data):
                    found = 'Visual Basic .NET'
                if is_autoit(data) and not found:
                    found = 'AutoIt'

            return found

        if not self.__check_session():
            return

        if is_packed(self.pe):
            self.log('warning', "Probably packed, the language guess might be unreliable")

        language = find_language(
            get_iat(self.pe),
            __sessions__.current.file,
            __sessions__.current.file.data
        )

        if language:
            self.log('info', "Probable language: {0}".format(bold(language)))
        else:
            self.log('error', "Programming language not identified")
            return

        if self.args.scan:
            self.log('info', "Scanning the repository for matching samples...")

            db = Database()
            samples = db.find(key='all')

            matches = []
            for sample in samples:
                if sample.sha256 == __sessions__.current.file.sha256:
                    continue

                sample_path = get_sample_path(sample.sha256)

                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_pe = pefile.PE(sample_path)
                except pefile.PEFormatError as e:
                    continue

                cur_packed = ''
                if is_packed(cur_pe):
                    cur_packed = 'Yes'

                cur_language = find_language(
                    get_iat(cur_pe),
                    sample,
                    open(sample_path, 'rb').read()
                )

                if not cur_language:
                    continue

                if cur_language == language:
                    matches.append([sample.name, sample.md5, cur_packed])

            if matches:
                self.log('table', dict(header=['Name', 'MD5', 'Is Packed'], rows=matches))
            else:
                self.log('info', "No matches found")
Ejemplo n.º 20
0
    def security(self):

        def get_certificate(pe):
            # TODO: this only extract the raw list of certificate data.
            # I need to parse them, extract single certificates and perhaps return
            # the PEM data of the first certificate only.
            pe_security_dir = pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']
            address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].VirtualAddress
            #  size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size

            if address:
                return pe.write()[address + 8:]
            else:
                return None

        def get_signed_samples(current=None, cert_filter=None):
            db = Database()
            samples = db.find(key='all')

            results = []
            for sample in samples:
                # Skip if it's the same file.
                if current:
                    if sample.sha256 == current:
                        continue

                # Obtain path to the binary.
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                # Open PE instance.
                try:
                    cur_pe = pefile.PE(sample_path)
                except:
                    continue

                cur_cert_data = get_certificate(cur_pe)

                if not cur_cert_data:
                    continue

                cur_cert_md5 = get_md5(cur_cert_data)

                if cert_filter:
                    if cur_cert_md5 == cert_filter:
                        results.append([sample.name, sample.md5])
                else:
                    results.append([sample.name, sample.md5, cur_cert_md5])

            return results

        if self.args.all:
            self.log('info', "Scanning the repository for all signed samples...")

            all_of_them = get_signed_samples()

            self.log('info', "{0} signed samples found".format(bold(len(all_of_them))))

            if len(all_of_them) > 0:
                self.log('table', dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them))

            return

        if not self.__check_session():
            return

        cert_data = get_certificate(self.pe)

        if not cert_data:
            self.log('warning', "No certificate found")
            return

        cert_md5 = get_md5(cert_data)

        self.log('info', "Found certificate with MD5 {0}".format(bold(cert_md5)))

        if self.args.dump:
            cert_path = os.path.join(self.args.dump, '{0}.crt'.format(__sessions__.current.file.sha256))
            with open(cert_path, 'wb+') as cert_handle:
                cert_handle.write(cert_data)

            self.log('info', "Dumped certificate to {0}".format(cert_path))
            self.log('info', "You can parse it using the following command:\n\t" +
                     bold("openssl pkcs7 -inform DER -print_certs -text -in {0}".format(cert_path)))

        # TODO: do scan for certificate's serial number.
        if self.args.scan:
            self.log('info', "Scanning the repository for matching signed samples...")

            matches = get_signed_samples(current=__sessions__.current.file.sha256, cert_filter=cert_md5)

            self.log('info', "{0} relevant matches found".format(bold(len(matches))))

            if len(matches) > 0:
                self.log('table', dict(header=['Name', 'SHA256'], rows=matches))

        # TODO: this function needs to be better integrated with the rest of the command.
        # TODO: need to add more error handling and figure out why so many samples are failing.
        if self.args.check:
            if not HAVE_VERIFYSIGS:
                self.log('error', "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)")
                return

            try:
                auth, computed_content_hash = get_auth_data(__sessions__.current.file.path)
            except Exception as e:
                self.log('error', "Unable to parse PE certificate: {0}".format(str(e)))
                return

            try:
                auth.ValidateAsn1()
                auth.ValidateHashes(computed_content_hash)
                auth.ValidateSignatures()
                auth.ValidateCertChains(time.gmtime())
            except Exception, e:
                self.log('error', "Unable to validate PE certificate: {0}".format(str(e)))
                return

            self.log('info', bold('Signature metadata:'))
            self.log('info', 'Program name: {0}'.format(auth.program_name))
            self.log('info', 'URL: {0}'.format(auth.program_url))

            if auth.has_countersignature:
                self.log('info', bold('Countersignature is present. Timestamp: {0} UTC'.format(
                        time.asctime(time.gmtime(auth.counter_timestamp)))))
            else:
                self.log('info', bold('Countersignature is not present.'))

            self.log('info', bold('Binary is signed with cert issued by:'))
            self.log('info', '{0}'.format(auth.signing_cert_id[0]))

            self.log('info', '{0}'.format(auth.cert_chain_head[2][0]))
            self.log('info', 'Chain not before: {0} UTC'.format(
                    time.asctime(time.gmtime(auth.cert_chain_head[0]))))
            self.log('info', 'Chain not after: {0} UTC'.format(
                    time.asctime(time.gmtime(auth.cert_chain_head[1]))))

            if auth.has_countersignature:
                self.log('info', bold('Countersig chain head issued by:'))
                self.log('info', '{0}'.format(auth.counter_chain_head[2]))
                self.log('info', 'Countersig not before: {0} UTC'.format(
                        time.asctime(time.gmtime(auth.counter_chain_head[0]))))
                self.log('info', 'Countersig not after: {0} UTC'.format(
                        time.asctime(time.gmtime(auth.counter_chain_head[1]))))

            self.log('info', bold('Certificates:'))
            for (issuer, serial), cert in auth.certificates.items():
                self.log('info', 'Issuer: {0}'.format(issuer))
                self.log('info', 'Serial: {0}'.format(serial))
                subject = cert[0][0]['subject']
                subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0]))
                self.log('info', 'Subject: {0}'.format(subject_dn))
                not_before = cert[0][0]['validity']['notBefore']
                not_after = cert[0][0]['validity']['notAfter']
                not_before_time = not_before.ToPythonEpochTime()
                not_after_time = not_after.ToPythonEpochTime()
                self.log('info', 'Not Before: {0} UTC ({1})'.format(
                        time.asctime(time.gmtime(not_before_time)), not_before[0]))
                self.log('info', 'Not After: {0} UTC ({1})'.format(
                        time.asctime(time.gmtime(not_after_time)), not_after[0]))

            if auth.trailing_data:
                self.log('info', 'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}'.format(
                        len(auth.trailing_data), auth.trailing_data.encode('hex')))
Ejemplo n.º 21
0
    def imphash(self):
        if self.args.scan and self.args.cluster:
            self.log('error', "You selected two exclusive options, pick one")
            return

        if self.args.cluster:
            self.log('info', "Clustering all samples by imphash...")

            db = Database()
            samples = db.find(key='all')

            cluster = {}
            for sample in samples:
                sample_path = get_sample_path(sample.sha256)
                if not os.path.exists(sample_path):
                    continue

                try:
                    cur_imphash = pefile.PE(sample_path).get_imphash()
                except:
                    continue

                if cur_imphash not in cluster:
                    cluster[cur_imphash] = []

                cluster[cur_imphash].append([sample.sha256, sample.name])

            for cluster_name, cluster_members in cluster.items():
                # Skipping clusters with only one entry.
                if len(cluster_members) == 1:
                    continue

                self.log('info', "Imphash cluster {0}".format(bold(cluster_name)))

                self.log('table', dict(header=['MD5', 'Name'],
                    rows=cluster_members))

            return

        if self.__check_session():
            try:
                imphash = self.pe.get_imphash()
            except AttributeError:
                self.log('error', "No imphash support, upgrade pefile to a version >= 1.2.10-139 (`pip install --upgrade pefile`)")
                return

            self.log('info', "Imphash: {0}".format(bold(imphash)))

            if self.args.scan:
                self.log('info', "Scanning the repository for matching samples...")

                db = Database()
                samples = db.find(key='all')

                matches = []
                for sample in samples:
                    if sample.sha256 == __sessions__.current.file.sha256:
                        continue

                    sample_path = get_sample_path(sample.sha256)
                    if not os.path.exists(sample_path):
                        continue

                    try:
                        cur_imphash = pefile.PE(sample_path).get_imphash()
                    except:
                        continue

                    if imphash == cur_imphash:
                        matches.append([sample.name, sample.sha256])

                self.log('info', "{0} relevant matches found".format(bold(len(matches))))

                if len(matches) > 0:
                    self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
Ejemplo n.º 22
0
    def run(self):
        super(Fuzzy, self).run()

        if not HAVE_PYDEEP:
            self.log(
                'error',
                "Missing dependency, install pydeep (`pip install pydeep`)")
            return

        arg_verbose = False
        arg_cluster = False
        if self.args:
            if self.args.verbose:
                arg_verbose = self.args.verbose
            if self.args.cluster:
                arg_cluster = self.args.cluster

            db = Database()
            samples = db.find(key='all')

            # Check if we're operating in cluster mode, otherwise we run on the
            # currently opened file.
            if arg_cluster:
                self.log('info',
                         "Generating clusters, this might take a while...")

                clusters = dict()
                for sample in samples:
                    if not sample.ssdeep:
                        continue

                    if arg_verbose:
                        self.log(
                            'info', "Testing file {0} with ssdeep {1}".format(
                                sample.md5, sample.ssdeep))

                    clustered = False
                    for cluster_name, cluster_members in clusters.items():
                        # Check if sample is already in the cluster.
                        if sample.md5 in cluster_members:
                            continue

                        if arg_verbose:
                            self.log(
                                'info', "Testing {0} in cluser {1}".format(
                                    sample.md5, cluster_name))

                        for member in cluster_members:
                            if sample.md5 == member[0]:
                                continue

                            member_hash = member[0]
                            member_name = member[1]

                            member_ssdeep = db.find(
                                key='md5', value=member_hash)[0].ssdeep
                            if pydeep.compare(sample.ssdeep,
                                              member_ssdeep) > 40:
                                if arg_verbose:
                                    self.log(
                                        'info',
                                        "Found home for {0} in cluster {1}".
                                        format(sample.md5, cluster_name))

                                clusters[cluster_name].append(
                                    [sample.md5, sample.name])
                                clustered = True
                                break

                    if not clustered:
                        cluster_id = len(clusters) + 1
                        clusters[cluster_id] = [
                            [sample.md5, sample.name],
                        ]

                ordered_clusters = collections.OrderedDict(
                    sorted(clusters.items()))

                self.log(
                    'info',
                    "Following are the identified clusters with more than one member"
                )

                for cluster_name, cluster_members in ordered_clusters.items():
                    # We include in the results only clusters with more than just
                    # one member.
                    if len(cluster_members) <= 1:
                        continue

                    self.log('info',
                             "Ssdeep cluster {0}".format(bold(cluster_name)))

                    self.log(
                        'table',
                        dict(header=['MD5', 'Name'], rows=cluster_members))

            # We're running against the already opened file.
            else:
                if not __sessions__.is_set():
                    self.log('error', "No session opened")
                    return

                if not __sessions__.current.file.ssdeep:
                    self.log('error',
                             "No ssdeep hash available for opened file")
                    return

                matches = []
                for sample in samples:
                    if sample.sha256 == __sessions__.current.file.sha256:
                        continue

                    if not sample.ssdeep:
                        continue

                    score = pydeep.compare(__sessions__.current.file.ssdeep,
                                           sample.ssdeep)

                    if score > 40:
                        matches.append(
                            ['{0}%'.format(score), sample.name, sample.sha256])

                    if arg_verbose:
                        self.log(
                            'info', "Match {0}%: {2} [{1}]".format(
                                score, sample.name, sample.sha256))

                self.log(
                    'info',
                    "{0} relevant matches found".format(bold(len(matches))))

                if len(matches) > 0:
                    self.log(
                        'table',
                        dict(header=['Score', 'Name', 'SHA256'], rows=matches))
Ejemplo n.º 23
0
    def run(self):
        super(VirusTotal, self).run()
        if self.args is None:
            return

        if self.args.hash:
            try:
                params = {'apikey': KEY,'hash':self.args.hash}
                response = requests.get(VIRUSTOTAL_URL_DOWNLOAD, params=params)

                if response.status_code == 403:
                    self.log('error','This command requires virustotal private API key')
                    self.log('error','Please check that your key have the right permissions')
                    return
                if response.status_code == 200:
                    response = response.content
                    tmp = tempfile.NamedTemporaryFile(delete=False)
                    tmp.write(response)
                    tmp.close()
                    return __sessions__.new(tmp.name)

            except Exception as e:
                    self.log('error', "Failed to download file: {0}".format(e))

        if not HAVE_REQUESTS:
            self.log('error', "Missing dependency, install requests (`pip install requests`)")
            return

        if not __sessions__.is_set():
            self.log('error', "No session opened")
            return

        data = {'resource': __sessions__.current.file.md5, 'apikey': KEY}

        try:
            response = requests.post(VIRUSTOTAL_URL, data=data)
        except Exception as e:
            self.log('error', "Failed performing request: {0}".format(e))
            return

        try:
            virustotal = response.json()
            # since python 2.7 the above line causes the Error dict object not callable
        except Exception as e:
            # workaround in case of python 2.7
            if str(e) == "'dict' object is not callable":
                try:
                    virustotal = response.json
                except Exception as e:
                    self.log('error', "Failed parsing the response: {0}".format(e))
                    self.log('error', "Data:\n{}".format(response.content))
                    return
            else:
                self.log('error', "Failed parsing the response: {0}".format(e))
                self.log('error', "Data:\n{}".format(response.content))
                return

        rows = []
        if 'scans' in virustotal:
            for engine, signature in virustotal['scans'].items():
                if signature['detected']:
                    signature = signature['result']
                else:
                    signature = ''
                rows.append([engine, signature])

        rows.sort()
        if rows:
            self.log('info', "VirusTotal Report:")
            self.log('table', dict(header=['Antivirus', 'Signature'], rows=rows))

            if self.args.submit:
                self.log('', "")
                self.log('info', "The file is already available on VirusTotal, no need to submit")
        else:
            self.log('info', "The file does not appear to be on VirusTotal yet")

            if self.args.submit:
                try:
                    data = {'apikey': KEY}
                    files = {'file': open(__sessions__.current.file.path, 'rb').read()}
                    response = requests.post(VIRUSTOTAL_URL_SUBMIT, data=data, files=files)
                except Exception as e:
                    self.log('error', "Failed Submit: {0}".format(e))
                    return

                try:
                    virustotal = response.json()
                    # since python 2.7 the above line causes the Error dict object not callable
                except Exception as e:
                    # workaround in case of python 2.7
                    if str(e) == "'dict' object is not callable":
                        try:
                            virustotal = response.json
                        except Exception as e:
                            self.log('error', "Failed parsing the response: {0}".format(e))
                            self.log('error', "Data:\n{}".format(response.content))
                            return
                    else:
                        self.log('error', "Failed parsing the response: {0}".format(e))
                        self.log('error', "Data:\n{}".format(response.content))
                        return

                if 'verbose_msg' in virustotal:
                    self.log('info', "{}: {}".format(bold("VirusTotal message"), virustotal['verbose_msg']))

        if self.args.comment:
            try:

                data = {'apikey' : KEY, 'resource': __sessions__.current.file.md5, 'comment' : ' '.join(self.args.comment)}
                response = requests.post(VIRUSTOTAL_URL_COMMENT,data=data)
            except Exception as e:
                self.log('error',"Failed Submit Comment: {0}".format(e))
                return
            try:
                virustotal = response.json()
                # since python 2.7 the above line causes the Error dict object not callable
            except Exception as e:
                # workaround in case of python 2.7
                if str(e) == "'dict' object is not callable":
                    try:
                        virustotal = response.json
                    except Exception as e:
                        self.log('error',"Failed parsing the response: {0}".format(e))
                        self.log('error',"Data:\n{}".format(response.content))
                        return
                else:
                    self.log('error',"Failed parsing the response: {0}".format(e))
                    self.log('error',"Data:\n{}".format(response.content))
                    return

            if 'verbose_msg' in virustotal:
                self.log('info',("{}: {}".format(bold("VirusTotal message"), virustotal['verbose_msg'])))
                return