def parse_results(self, response): res = Result() response = response.get('results', response) if response is not None and response.get('response_code') == 1: av_hits = ResultSection(title_text='Anti-Virus Detections') url_section = ResultSection( SCORE.NULL, 'Virus total report permalink', self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.URL, body=json.dumps({"url": response.get('permalink')})) res.add_section(url_section) scans = response.get('scans', response) av_hits.add_line( 'Found %d AV hit(s) from %d scans.' % (response.get('positives'), response.get('total'))) for majorkey, subdict in sorted(scans.iteritems()): if subdict['detected']: virus_name = subdict['result'] res.append_tag( VirusHitTag(virus_name, context="scanner:%s" % majorkey)) av_hits.add_section( AvHitSection(majorkey, virus_name, SCORE.SURE)) res.add_result(av_hits) return res
def parse_alerts(self, alerts): res = Result() line_count = 0 newline_count = 0 content = "" yml_indicator = "" xml_hits = ResultSection(title_text='xml Malware Indicator Match') if os.stat("/opt/al/pkg/al_services/alsvc_beach/alerts_generated.txt" ).st_size == 0: # Result file is empty, nothing to report return res for line in alerts: # Otherwise we iterate through each line to read the required information if line != "\n": line_count += 1 if line_count == 1: yml_indicator = line else: content += line + "\n" elif line_count == 0: newline_count += 1 else: newline_count = 0 xml_hits.add_section( XmlResultObject(yml_indicator, content, SCORE.VHIGH)) content = "" line_count = 0 res.add_result(xml_hits) return res
def parse_results(self, response): res = Result() response = response.get('results', response) if response is not None and response.get('response_code') == 204: message = "You exceeded the public API request rate limit (4 requests of any nature per minute)" raise VTException(message) elif response is not None and response.get('response_code') == 203: message = "You tried to perform calls to functions for which you require a Private API key." raise VTException(message) elif response is not None and response.get('response_code') == 1: av_hits = ResultSection(title_text='Anti-Virus Detections') url_section = ResultSection( SCORE.NULL, 'Virus total report permalink', self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.URL, body=json.dumps({"url": response.get('permalink')})) res.add_section(url_section) scans = response.get('scans', response) av_hits.add_line('Found %d AV hit(s) from %d scans.' % (response.get('positives'), response.get('total'))) for majorkey, subdict in sorted(scans.iteritems()): if subdict['detected']: virus_name = subdict['result'] res.append_tag(VirusHitTag(virus_name, context="scanner:%s" % majorkey)) av_hits.add_section(AvHitSection(majorkey, virus_name, SCORE.SURE)) res.add_result(av_hits) return res
def parse(self, output=None): data = json.loads(str(output)) parent_section = ResultSection(SCORE.NULL, "Manalyze Results:") for name, level2 in data.iteritems(): # Skip the first level (Its the filename) for key, value in level2.iteritems(): section = ResultSection(SCORE.NULL, key) self.recurse_dict(value, section) if section.body.count("\n") > 25: section.body_format = TEXT_FORMAT.MEMORY_DUMP parent_section.add_section(section) return parent_section
def lookup_av_hits(response): results = response.get('antivirus', None) if not results: return None, [] tags = [] r_section = ResultSection(title_text='Anti-Virus Detections') r_section.add_line('Found %d AV hit(s).' % len(results)) for result in results: r_section.add_section( AvHitSection(result['scannerID'], result['name'], SCORE.SURE)) tags.append( VirusHitTag(result['name'], context="scanner:%s" % result['scannerID'])) return r_section, tags
def parse_results(self, response): res = Result() response = response.get('scan_results', response) virus_name = "" if response is not None and response.get('progress_percentage') == 100: hit = False av_hits = ResultSection(title_text='Anti-Virus Detections') scans = response.get('scan_details', response) for majorkey, subdict in sorted(scans.iteritems()): score = SCORE.NULL if subdict['scan_result_i'] == 1: virus_name = subdict['threat_found'] if virus_name: score = SCORE.SURE elif subdict['scan_result_i'] == 2: virus_name = subdict['threat_found'] if virus_name: score = SCORE.VHIGH if score: virus_name = virus_name.replace("a variant of ", "") engine = self.engine_map[self._format_engine_name( majorkey)] res.append_tag( VirusHitTag(virus_name, context="scanner:%s" % majorkey)) av_hits.add_section( AvHitSection(majorkey, virus_name, engine, score)) hit = True if hit: res.add_result(av_hits) return res
def macro_section_builder(self, vba_code): vba_code_sha256 = hashlib.sha256(vba_code).hexdigest() macro_section = ResultSection(SCORE.NULL, "OleVBA : Macro detected") macro_section.add_line("Macro SHA256 : %s" % vba_code_sha256) #macro_section.add_line("Resubmitted macro as: macro_%s.vba" % vba_code_sha256[:7]) macro_section.add_tag(TAG_TYPE.OLE_MACRO_SHA256, vba_code_sha256, weight=TAG_WEIGHT.LOW, usage=TAG_USAGE.CORRELATION) dump_title = "Macro contents dump" analyzed_code = self.deobfuscator(vba_code) req_deob = False if analyzed_code != vba_code: req_deob = True dump_title += " [deobfuscated]" if len(analyzed_code) > self.MAX_STRINGDUMP_CHARS: dump_title += " - Displaying only the first %s characters." % self.MAX_STRINGDUMP_CHARS dump_subsection = ResultSection(SCORE.NULL, dump_title, body_format=TEXT_FORMAT.MEMORY_DUMP) dump_subsection.add_line(analyzed_code[0:self.MAX_STRINGDUMP_CHARS]) else: dump_subsection = ResultSection(SCORE.NULL, dump_title, body_format=TEXT_FORMAT.MEMORY_DUMP) dump_subsection.add_line(analyzed_code) if req_deob: dump_subsection.add_tag(TAG_TYPE.TECHNIQUE_OBFUSCATION, "VBA Macro String Functions", weight=TAG_WEIGHT.LOW, usage=TAG_USAGE.IDENTIFICATION) score_subsection = self.macro_scorer(analyzed_code) if score_subsection: macro_section.add_section(score_subsection) macro_section.add_section(dump_subsection) # Flag macros if self.flag_macro(analyzed_code): macro_section.add_section(ResultSection(SCORE.HIGH, "Macro may be packed or obfuscated.")) return macro_section
def parse_api(data): result = Result() # Info block hash_info = data.get('hash_info') if not hash_info: return result r_info = ResultSection(title_text='File Info') r_info.score = SCORE.NULL r_info.add_line('Received Data: %s-%s-%s' % (data['received_date'][:4], data['received_date'][4:6], data['received_date'][6:])) r_info.add_line('Size: %s' % hash_info.get('filesize', "")) r_info.add_line('MD5: %s' % hash_info.get('md5', "")) r_info.add_line('SHA1: %s' % hash_info.get('sha1', "")) r_info.add_line('SHA256: %s' % hash_info.get('sha256', "")) r_info.add_line('SSDeep Blocksize: %s' % hash_info.get('ssdeep_blocksize', "")) r_info.add_line('SSDeep Hash1: %s' % hash_info.get('ssdeep_hash1', "")) r_info.add_line('SSDeep Hash2: %s' % hash_info.get('ssdeep_hash1', "")) result.add_result(r_info) callouts = data.get('callouts', []) if len(callouts) > 0: max_callouts = 10 r_callouts = ResultSection(title_text='Sandbox Call-Outs') r_callouts.score = SCORE.VHIGH analyser = '' r_call_sub_section = None reported_count = 0 for callout in callouts: reported_count += 1 if reported_count <= max_callouts: if analyser != callout['ip']: title = '%s (Analysed on %s)' % (callout['ip'], callout['addedDate']) r_call_sub_section = ResultSection(title_text=title, parent=r_callouts) analyser = callout['ip'] channel = callout['channel'] if channel is not None: channel = "(%s)" % channel.split('~~')[0] else: channel = "" r_call_sub_section.add_line("{0:s}:{1:d}{2:s}".format( callout['callout'], callout['port'], channel)) try: p1, p2, p3, p4 = callout['callout'].split(".") if int(p1) <= 255 and int(p2) <= 255 and int( p3) <= 255 and int(p4) <= 255: result.append_tag( Tag(TAG_TYPE.NET_IP, callout['callout'], TAG_WEIGHT.MED, context=Context.BEACONS)) except ValueError: result.append_tag( Tag(TAG_TYPE.NET_DOMAIN_NAME, callout['callout'], TAG_WEIGHT.MED, context=Context.BEACONS)) if callout['port'] != 0: result.append_tag( Tag(TAG_TYPE.NET_PORT, str(callout['port']), TAG_WEIGHT.MED, context=Context.BEACONS)) if len(callouts) > max_callouts: r_callouts.add_line("And %s more..." % str(len(callouts) - 10)) result.add_result(r_callouts) spamcount = data.get('spamCount', {}) if spamcount: r_spam = ResultSection(title_text='SPAM feed') r_spam.score = SCORE.VHIGH r_spam.add_line('Found %d related spam emails' % spamcount['count']) email_sample = spamcount.get("email_sample", {}) r_spam.add_line('\tFirst Seen: %s' % email_sample['firstSeen']) r_spam.add_line('\tLast Seen: %s' % email_sample['lastSeen']) r_sub_section = ResultSection(title_text='Attachments', parent=r_spam) if email_sample['filename']: r_sub_section.add_line( '%s - md5: %s' % (email_sample['filename'], email_sample['filenameMD5'])) if email_sample['attachment']: r_sub_section.add_line('%s - md5: %s' % (email_sample['attachment'], email_sample['attachmentMD5'])) result.add_result(r_spam) av_results = data.get('av_results', []) if len(av_results) > 0: r_av_sec = ResultSection(title_text='Anti-Virus Detections') r_av_sec.add_line('Found %d AV hit(s).' % len(av_results)) for av_result in av_results: r_av_sec.add_section( AvHitSection(av_result['scannerID'], av_result['name'], SCORE.SURE)) result.append_tag( VirusHitTag(av_result['name'], context="scanner:%s" % av_result['scannerID'])) result.add_result(r_av_sec) return result
def execute(self, request): # Create a result object where all the sections will be stored result = Result() # ================================================================== # Default Section: # Default section basically just dumps the text to the screen... # All sections scores will be SUMed in the service result # The Result classification will be the highest classification found in the sections default_section = ResultSection(SCORE.LOW, 'Example of a default section', Classification.RESTRICTED) default_section.add_line("You can add line by line!") default_section.add_lines(["Or", "Multiple lines", "Inside a list!"]) # ================================================================== # Color map Section: # Creates a color map bar using a minimum and maximum domain cmap_min = 0 cmap_max = 20 color_map_data = { 'type': 'colormap', 'data': { 'domain': [cmap_min, cmap_max], 'values': [random.random() * cmap_max for _ in xrange(50)] } } section_color_map = ResultSection(SCORE.NULL, "Example of colormap result section", self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.GRAPH_DATA, body=json.dumps(color_map_data)) # ================================================================== # URL section: # Generate a list of clickable urls using a json encoded format url_section = ResultSection(SCORE.NULL, 'Example of a simple url section', self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.URL, body=json.dumps({ "name": "Google", "url": "https://www.google.com/" })) # You can add tags to any section although those tag will be brought up to the result object # Tags are defined by a type, value and weight (confidence lvl) # you can also add a classification and context if needed url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME, "google.com", TAG_WEIGHT.LOW) url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME, "bob.com", TAG_WEIGHT.LOW, classification=Classification.RESTRICTED) url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME, "baddomain.com", TAG_WEIGHT.LOW, context=Context.BEACONS) # You may also want to provide a list of url! Also, No need to provide a name, the url link will be displayed urls = [{ "url": "https://google.com/" }, { "url": "https://google.ca/" }, { "url": "https://microsoft.com/" }] url_section2 = ResultSection( SCORE.MED, 'Example of a url section with multiple links', self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.URL, body=json.dumps(urls)) # Add url_section2 as a subsection of url section # The score of the subsections will automatically be added to the parent section url_section.add_section(url_section2) # ================================================================== # Memory dump section: # Dump whatever string content you have into a <pre/> html tag so you can do your own formatting data = hexdump( "This is some random text that we will format as an hexdump and you'll see " "that the hexdump formatting will be preserved by the memory dump section!" ) memdump_section = ResultSection(SCORE.NULL, 'Example of a memory dump section', self.SERVICE_CLASSIFICATION, body_format=TEXT_FORMAT.MEMORY_DUMP, body=data) # ================================================================== # Re-Submitting files to the system # Adding extracted files will have them resubmitted to the system for analysis if request.srl != '8cf8277a71e85122bf7ea4610c7cfcc0bfb6dee799be50a41b2f4b1321b3317f': # This IF just prevents resubmitting the same file in a loop for this exemple... temp_path = tempfile.mktemp(dir=self.working_directory) with open(temp_path, "w") as myfile: myfile.write(data) request.add_extracted(temp_path, "Extracted by some random magic!", display_name="file.txt") # ================================================================== # Supplementary files # Adding supplementary files will save them on the datastore for future # reference but wont reprocess those files. temp_path = tempfile.mktemp(dir=self.working_directory) with open(temp_path, "w") as myfile: myfile.write(json.dumps(urls)) request.add_supplementary(temp_path, "These are urls as a JSON", display_name="urls.json") # ================================================================== # Wrap-up: # Add all sections to the Result object result.add_section(default_section) result.add_section(section_color_map) result.add_section(url_section) result.add_section(memdump_section) request.result = result
def macro_scorer(self, text): self.log.debug("Macro scorer running") score_section = None try: vba_scanner = VBA_Scanner(text) vba_scanner.scan(include_decoded_strings=True) for string in self.ADDITIONAL_SUSPICIOUS_KEYWORDS: if re.search(string, text, re.IGNORECASE): # play nice with detect_suspicious from olevba.py vba_scanner.suspicious_keywords.append((string, 'May download files from the Internet')) stringcount = len(vba_scanner.autoexec_keywords) + len(vba_scanner.suspicious_keywords) + \ len(vba_scanner.iocs) if stringcount > 0: score_section = ResultSection(SCORE.NULL, "Interesting macro strings found") if len(vba_scanner.autoexec_keywords) > 0: subsection = ResultSection(min(self.MAX_STRING_SCORE, SCORE.LOW * len(vba_scanner.autoexec_keywords)), "Autoexecution strings") for keyword, description in vba_scanner.autoexec_keywords: subsection.add_line(keyword) subsection.add_tag(TAG_TYPE.OLE_MACRO_SUSPICIOUS_STRINGS, keyword, TAG_WEIGHT.HIGH, usage=TAG_USAGE.IDENTIFICATION) score_section.add_section(subsection) if len(vba_scanner.suspicious_keywords) > 0: subsection = ResultSection(min(self.MAX_STRING_SCORE, SCORE.MED * len(vba_scanner.suspicious_keywords)), "Suspicious strings or functions") for keyword, description in vba_scanner.suspicious_keywords: subsection.add_line(keyword) subsection.add_tag(TAG_TYPE.OLE_MACRO_SUSPICIOUS_STRINGS, keyword, TAG_WEIGHT.HIGH, usage=TAG_USAGE.IDENTIFICATION) score_section.add_section(subsection) if len(vba_scanner.iocs) > 0: subsection = ResultSection(min(500, SCORE.MED * len(vba_scanner.iocs)), "Potential host or network IOCs") scored_macro_uri = False for keyword, description in vba_scanner.iocs: # olevba seems to have swapped the keyword for description during iocs extraction # this holds true until at least version 0.27 subsection.add_line("{}: {}".format(keyword, description)) desc_ip = self.ip_re.match(description) if self.parse_uri(description) is True: scored_macro_uri = True elif desc_ip: ip_str = desc_ip.group(1) if not is_ip_reserved(ip_str): scored_macro_uri = True subsection.add_tag(TAG_TYPE.NET_IP, ip_str, TAG_WEIGHT.HIGH, usage=TAG_USAGE.CORRELATION) score_section.add_section(subsection) if scored_macro_uri and self.scored_macro_uri is False: self.scored_macro_uri = True scored_uri_section = ResultSection(score=500, title_text="Found network indicator(s) within macros") self.ole_result.add_section(scored_uri_section) except Exception as e: self.log.debug("OleVBA VBA_Scanner constructor failed: {}".format(str(e))) return score_section