Esempio n. 1
0
    def execute(self, request):
        path = request.download()
        with open(path, 'rb') as fin:
            (entropy, part_entropies) = calculate_partition_entropy(fin)

        entropy_graph_data = {
            'type': 'colormap',
            'data': {
                'domain': [0, 8],
                'values': part_entropies
            }
        }
        section = ResultSection(SCORE.NULL,
                                'Entropy.\tEntire File: {}'.format(
                                    round(entropy, 3)),
                                self.SERVICE_CLASSIFICATION,
                                body_format=TEXT_FORMAT.GRAPH_DATA,
                                body=json.dumps(entropy_graph_data))
        result = Result()
        result.add_section(section)
        request.result = result
Esempio n. 2
0
 def execute(self, request):
     request.result = Result()
     uresult = self._unpack(request, ['upx'])
     if uresult.ok and uresult.localpath:
         request.add_extracted(uresult.localpath,
                               'Unpacked from %s' % request.srl,
                               display_name=uresult.displayname)
         request.result.add_section(
             ResultSection(
                 SCORE.NULL, "%s successfully unpacked!" %
                 (os.path.basename(uresult.displayname)),
                 self.SERVICE_CLASSIFICATION))
    def icap_to_alresult(self, icap_result):
        x_response_info = None
        x_virus_id = None
        result_lines = icap_result.strip().splitlines()
        if not len(result_lines) > 3:
            raise Exception('Invalid result from Kaspersky ICAP server: %s' % str(icap_result))

        xri_key = 'X-Response-Info:'
        xvirus_key = 'X-Virus-ID:'
        for line in result_lines:
            if line.startswith(xri_key):
                x_response_info = line[len(xri_key):].strip()
            elif line.startswith(xvirus_key):
                x_virus_id = line[len(xvirus_key):].strip()

        result = Result()
        # Virus hits should have XRI of 'blocked' and XVIRUS containing the virus information.
        # Virus misses should have XRI of 'passed' and no XVIRUS section
        if x_virus_id:
            if not x_response_info == 'blocked':
                self.log.warn('found virus id but response was: %s', str(x_response_info))
            virus_name = x_virus_id.replace('INFECTED ', '')
            result.add_section(VirusHitSection(virus_name, SCORE.SURE))
            result.append_tag(VirusHitTag(virus_name))
            
        return result
Esempio n. 4
0
    def icap_to_alresult(self, icap_result):
        infection_type = ''
        infection_name = ''
        result_lines = icap_result.strip().splitlines()
        if not len(result_lines) > 3:
            raise Exception('Invalid result from FSecure ICAP server: %s' % str(icap_result))

        x_scan_result = 'X-FSecure-Scan-Result:'
        x_infection_name = 'X-FSecure-Infection-Name:'
        istag = 'ISTag:'

        for line in result_lines:
            if line.startswith(x_scan_result):
                infection_type = line[len(x_scan_result):].strip()
            elif line.startswith(x_infection_name):
                infection_name = line[len(x_infection_name):].strip().strip('"')
            elif line.startswith(istag):
                version_info = line[len(istag):].strip()
                self._set_av_ver(version_info)

        result = Result()
        if infection_name:
            result.add_section(VirusHitSection(infection_name, SCORE.SURE, detection_type=infection_type))
            result.append_tag(VirusHitTag(infection_name))
            
        return result
Esempio n. 5
0
    def execute(self, request):
        result = Result()

        try:
            res = self.connection.query(request.sha256)
        except CFMDDatasource.DatabaseException:
            raise RecoverableError("Query failed")
        if res:
            res_sec = ResultSection(
                title_text="This file was found in the %s. It is not malware."
                % CFMDDatasource.Name,
                score=SCORE['NOT'])

            for item in res:
                res_sec.add_line("%s (%s bytes)" %
                                 (item['filename'], item['size']))
                res_sec.add_line(" MD5: %s" % item['md5'])
                res_sec.add_line(" SHA1: %s" % item['sha1'])
                res_sec.add_line(" SHA256: %s" % item['sha256'])
                res_sec.add_line("")

            result.add_section(res_sec)

        request.result = result
Esempio n. 6
0
    def execute(self, request):
        file_path = request.download()
        filename = os.path.basename(file_path)
        bndb = os.path.join(self.working_directory, "%s.bndb" % filename)
        disas = os.path.join(self.working_directory, filename)

        self.clean_structures()

        if request.tag.startswith("executable/windows/"):
            self.bv = binaryninja.BinaryViewType['PE'].open(file_path)
        else:
            return

        if self.bv is None:
            return

        result = Result()
        self.bv.update_analysis_and_wait()
        # Preparation
        self.linear_sweep()
        self.preprocess()
        self.symbol_usage()
        self.process_target_functions()
        # Check Signatures
        for sig in self.sigs:
            results = {}
            self.check_api_sig(sig, results)
            if len(results) > 0:
                for res in results:
                    rn = "%s - %s" % (results[res].name.split("-A")[0],
                                      sig['name'])
                    section = ResultSection(sig['score'], rn)
                    if res in self.processed:
                        fn = "%s_%s" % (disas, rn.replace(" ", "_"))
                        with open(fn, "wb") as fp:
                            fp.write("\n".join("%s" % l
                                               for l in self.processed[res]))
                            request.add_supplementary(
                                fn, "Linear Disassembly of Matched Function",
                                rn + ".disas")
                    results[res].name = rn
                    result.add_section(section)
        # Finalize Results and Store BNDB
        self.bv.create_database(bndb)
        request.add_supplementary(bndb, "Binary Ninja DB", filename + ".bndb")
        section = ResultSection(self.apiscore, "Target Symbols X-refs")
        for sym in sorted(self.used_syms.items(),
                          key=lambda x: x[1],
                          reverse=True):
            section.add_line("%d\t%s" % (sym[1], sym[0]))
        result.add_section(section)
        request.result = result

        self.clean_structures()
Esempio n. 7
0
    def execute_batch(self, request_batch):
        # BitDefender scans a folder at a time. Download all inputs to a folder
        # and scan it.
        batch_folder = request_batch.download()

        # Initially mark all as failed.
        for request in request_batch.requests:
            request.successful = True
            request.result = Result()
            request.error_is_recoverable = True
            request.error_text = 'Did not found an entry for this file in the AV output'

        scanner = BitDefenderScanner(self.working_directory, self.exe_path)

        try:
            scan_results = scanner.scan_folder(batch_folder)

            for original_path, av_result in scan_results.results.iteritems():
                request = request_batch.find_by_local_path(original_path)
                if not request:
                    self.log.error(
                        "Could not find task associated with path: %s\n.",
                        original_path)
                    continue

                result = Result()
                for embedded_file, (is_virus, infection_type, infection_name,
                                    _) in av_result.iteritems():
                    if not is_virus:
                        continue

                    score = SCORE.HIGH
                    if infection_type == 'infected':
                        score = SCORE.SURE

                    result.append_tag(VirusHitTag(infection_name))
                    result.add_section(
                        VirusHitSection(infection_name, score, embedded_file,
                                        infection_type))

                    # TODO(CVE / Exploit tag extraction)

                request.result = result
                request.successful = True
                request.task.report_service_context(self._av_info)
        except RecoverableError, rec_err:
            for request in request_batch.requests:
                request.successful = False
                request.error_text = rec_err.message
Esempio n. 8
0
    def execute(self, request):
        request.result = Result()
        local_filename = request.download()
        with open(local_filename) as f:
            file_content = f.read()
        request.set_service_context(self._av_info)
        max_retry = 2
        done = False
        retry = 0

        while not done:
            # If this is a retry, sleep for a second
            if retry:
                # Sleep between 1 and 3 seconds times the number of retry
                time.sleep(retry * random.randrange(100, 300, 1) / float(100))

            output = self.icap.scan_data(file_content)

            ret = self.parse_results(output, request.result, local_filename)
            if ret in [201, 204]:
                done = True
            elif ret == 500:
                # Symantec often 500's on truncated zips and other formats. It tries to decompress/parse
                # them and can't proceed.
                request.result.add_section(
                    ResultSection(SCORE.NULL,
                                  'Symantec could not scan this file.'))
                done = True
            elif ret == 551:
                if retry == max_retry:
                    raise Exception("[FAILED %s times] Resources unvailable" %
                                    max_retry)
                else:
                    self.log.info("Resource unavailable... retrying")
                    retry += 1
            elif ret == 558:
                raise Exception(
                    "Could not scan file, Symantec license is expired!")
            elif ret == 100:
                raise Exception("Could not find response from icap service, "
                                "response header %s" %
                                output.partition("\r")[0])
            else:
                raise Exception("Unknown return code from symantec: %s" % ret)
        return
Esempio n. 9
0
    def execute_batch(self, request_batch):
        self.log.info('Execute batch of size %d', len(request_batch.requests))

        request_batch.download()
        paths_to_scan = []
        for request in request_batch.requests:
            if request.successful and request.local_path:
                paths_to_scan.append(request.local_path)

        # Initially mark all as failed.
        for request in request_batch.requests:
            request.successful = True
            request.error_is_recoverable = True
            request.result = Result()
            # request.error_text = 'Did not find an entry for this file in the AV output'

        scanner = McAfeeScanner(self.exe_path, self.dat_directory,
                                self.working_directory)  # pylint: disable=E0602
        scan_results = scanner.scan_files(paths_to_scan)
        if not scan_results:
            return

        for original_path, av_result in scan_results.results.iteritems():
            request = request_batch.find_by_local_path(original_path)
            if not request:
                self.log.error(
                    'Could not find request associated with path %s',
                    original_path)
                continue

            request.task.report_service_context(self._av_info)

            result = Result()
            for embedded_file, (is_virus, detection_type, virus_name,
                                _reserved) in av_result.iteritems():
                if not is_virus:
                    continue
                result.append_tag(VirusHitTag(virus_name))
                result.add_section(
                    VirusHitSection(virus_name, SCORE.SURE, embedded_file,
                                    detection_type))
            request.result = result
            request.successful = True

            request_batch.delete_downloaded()
Esempio n. 10
0
    def execute(self, request):
        request.result = Result()
        temp_filename = request.download()

        # Filter out large documents
        if os.path.getsize(temp_filename) > self.max_pdf_size:
            request.result.add_section(ResultSection(SCORE['NULL'], "PDF Analysis of the file was skipped because the "
                                                                    "file is too big (limit is %i MB)." % (
                                                                    self.max_pdf_size / 1000 / 1000)))
            return

        filename = os.path.basename(temp_filename)
        # noinspection PyUnusedLocal
        file_content = ''
        with open(temp_filename, 'r') as f:
            file_content = f.read()

        if '<xdp:xdp' in file_content:
            self.find_xdp_embedded(filename, file_content, request)

        self.peepdf_analysis(temp_filename, file_content, request)
Esempio n. 11
0
    def execute(self, request):
        local = request.download()

        al_result = Result()

        command = self.construct_command(request)

        request.task.set_milestone("started", True)

        extract_section = ResultSection(SCORE.NULL, 'Extracted and Carved Files')

        for module in binwalk.scan(local, **command):
            section = ResultSection(SCORE.NULL, module.name, body_format=TEXT_FORMAT.MEMORY_DUMP)
            for result in module.results:
                section.add_line("0x%.8X : %s" % (result.offset, result.description))

                if(module.extractor.output.has_key(result.file.path)):

                    if module.extractor.output[result.file.path].carved.has_key(result.offset):
                        extract_section.add_line("Carved data from offset 0x%X to %s" % (result.offset, module.extractor.output[result.file.path].carved[result.offset]))
                        file_name = module.extractor.output[result.file.path].carved[result.offset].split("/")[-1]
                        request.add_extracted(module.extractor.output[result.file.path].carved[result.offset], 'Carved File', file_name)

                    if module.extractor.output[result.file.path].extracted.has_key(result.offset) and \
                            len(module.extractor.output[result.file.path].extracted[result.offset].files) > 0:

                        path = module.extractor.output[result.file.path].extracted[result.offset].files[0]
                        extract = module.extractor.output[result.file.path].extracted[result.offset].command

                        extract_section.add_line("Extracted %d files from offset 0x%X to '%s' using '%s'" % (
                            len(module.extractor.output[result.file.path].extracted[result.offset].files),
                            result.offset,
                            path,
                            extract))

                        if(os.path.isdir(path)):
                            file = zipfile.ZipFile("%s.zip" % path.split("/")[-1], 'w', zipfile.ZIP_DEFLATED)
                            self.zip_dir(path, file)
                            file.close()
                            request.add_supplementary(file.filename, extract, file.filename.split("/")[-1])
                        else:
                            request.add_extracted(path, extract, path.split("/")[-1])

            al_result.add_section(section)

        request.task.set_milestone("finished", True)
        al_result.add_section(extract_section)
        request.result = al_result
Esempio n. 12
0
    def execute(self, request):
        request.result = Result()
        request.set_service_context(self._av_info)
        filename = request.download()

        # Generate the temporary resulting filename which AVG is going to dump the results in
        out_file = os.path.join(self.working_directory, "scanning_results.txt")

        cmd = [
            self.avg_path, "-H", "-p", "-o", "-w", "-b", "-j", "-a",
            "--report=%s" % out_file, filename
        ]
        devnull = open('/dev/null', 'wb')
        proc = subprocess.Popen(cmd,
                                stdout=devnull,
                                stderr=devnull,
                                cwd=os.path.dirname(self.avg_path))
        proc.wait()

        try:
            # AVG does not support unicode file names, so any results it returns for these files will be filtered out
            out_file_handle = codecs.open(out_file,
                                          mode='rb',
                                          encoding="utf-8",
                                          errors="replace")
            output = out_file_handle.read()
            out_file_handle.close()

            # 2- Parse the output and fill in the result objects
            self.parse_results_seq(output, request.result,
                                   len(self.working_directory))

        except Exception, scan_exception:
            self.log.error("AVG scanning was not completed: %s" %
                           str(scan_exception))
            raise
Esempio n. 13
0
    def parse_results(self, response):
        res = Result()
        response = response.get('scan_results', response)
        virus_name = ""

        if response is not None and response.get('progress_percentage') == 100:
            hit = False
            av_hits = ResultSection(title_text='Anti-Virus Detections')

            scans = response.get('scan_details', response)
            for majorkey, subdict in sorted(scans.iteritems()):
                score = SCORE.NULL
                if subdict['scan_result_i'] == 1:
                    virus_name = subdict['threat_found']
                    if virus_name:
                        score = SCORE.SURE
                elif subdict['scan_result_i'] == 2:
                    virus_name = subdict['threat_found']
                    if virus_name:
                        score = SCORE.VHIGH

                if score:
                    virus_name = virus_name.replace("a variant of ", "")
                    engine = self.engine_map[self._format_engine_name(
                        majorkey)]
                    res.append_tag(
                        VirusHitTag(virus_name,
                                    context="scanner:%s" % majorkey))
                    av_hits.add_section(
                        AvHitSection(majorkey, virus_name, engine, score))
                    hit = True

            if hit:
                res.add_result(av_hits)

        return res
Esempio n. 14
0
    def execute(self, request):
        file_path = request.download()
        result = Result()

        # restart Suricata if we need to
        self.start_suricata_if_necessary()

        # Update our rules if they're stale,
        self.reload_rules_if_necessary()

        # Strip frame headers from the PCAP, since Suricata sometimes has trouble parsing strange PCAPs
        stripped_filepath = self.strip_frame_headers(file_path)

        # Pass the pcap file to Suricata via the socket
        ret = self.suricata_sc.send_command(
            "pcap-file", {
                "filename": stripped_filepath,
                "output-dir": self.working_directory
            })

        if not ret or ret["return"] != "OK":
            self.log.exception("Failed to submit PCAP for processing: %s" %
                               ret['message'])

        # Wait for the socket finish processing our PCAP
        while True:
            time.sleep(1)
            ret = self.suricata_sc.send_command("pcap-current")

            if ret and ret["message"] == "None":
                break

        alerts = {}
        signatures = {}
        domains = []
        ips = []
        urls = []

        # Parse the json results of the service
        for line in open(os.path.join(self.working_directory, 'eve.json')):
            record = json.loads(line)

            timestamp = dateparser.parse(record['timestamp']).isoformat(' ')
            src_ip = record['src_ip']
            src_port = record['src_port']
            dest_ip = record['dest_ip']
            dest_port = record['dest_port']

            if src_ip not in ips:
                ips.append(src_ip)
            if dest_ip not in ips:
                ips.append(dest_ip)

            if record['event_type'] == 'http':
                if 'hostname' not in record['http'] or 'url' not in record[
                        'http']:
                    continue

                domain = record['http']['hostname']
                if domain not in domains and domain not in ips:
                    domains.append(domain)
                url = "http://" + domain + record['http']['url']
                if url not in urls:
                    urls.append(url)

            if record['event_type'] == 'dns':
                if 'rrname' not in record['dns']:
                    continue
                domain = record['dns']['rrname']
                if domain not in domains and domain not in ips:
                    domains.append(domain)

            if record['event_type'] == 'alert':
                if 'signature_id' not in record[
                        'alert'] or 'signature' not in record['alert']:
                    continue
                signature_id = record['alert']['signature_id']
                signature = record['alert']['signature']

                if signature_id not in alerts:
                    alerts[signature_id] = []
                if signature_id not in signatures:
                    signatures[signature_id] = signature

                alerts[signature_id].append(
                    "%s %s:%s -> %s:%s" %
                    (timestamp, src_ip, src_port, dest_ip, dest_port))

        # Create the result sections if there are any hits
        if len(alerts) > 0:
            for signature_id, signature in signatures.iteritems():
                score = SCORE.NULL
                tag_weight = TAG_WEIGHT.NULL

                if any(x in signature
                       for x in self.cfg.get("SURE_SCORE").split()):
                    score = SCORE.SURE
                    tag_weight = TAG_WEIGHT.SURE

                if any(x in signature
                       for x in self.cfg.get("VHIGH_SCORE").split()):
                    score = SCORE.VHIGH
                    tag_weight = TAG_WEIGHT.VHIGH

                section = ResultSection(score,
                                        '%s: %s' % (signature_id, signature))
                for flow in alerts[signature_id][:10]:
                    section.add_line(flow)
                if len(alerts[signature_id]) > 10:
                    section.add_line('And %s more flows' %
                                     (len(alerts[signature_id]) - 10))
                result.add_section(section)

                # Add a tag for the signature id and the message
                result.add_tag(TAG_TYPE.SURICATA_SIGNATURE_ID,
                               str(signature_id),
                               tag_weight,
                               usage=TAG_USAGE.IDENTIFICATION)
                result.add_tag(TAG_TYPE.SURICATA_SIGNATURE_MESSAGE,
                               signature,
                               tag_weight,
                               usage=TAG_USAGE.IDENTIFICATION)

            # Add tags for the domains, urls, and IPs we've discovered
            for domain in domains:
                result.add_tag(TAG_TYPE.NET_DOMAIN_NAME,
                               domain,
                               TAG_WEIGHT.VHIGH,
                               usage=TAG_USAGE.CORRELATION)
            for url in urls:
                result.add_tag(TAG_TYPE.NET_FULL_URI,
                               url,
                               TAG_WEIGHT.VHIGH,
                               usage=TAG_USAGE.CORRELATION)
            for ip in ips:
                result.add_tag(TAG_TYPE.NET_IP,
                               ip,
                               TAG_WEIGHT.VHIGH,
                               usage=TAG_USAGE.CORRELATION)

            # Add the original Suricata output as a supplementary file in the result
            request.add_supplementary(
                os.path.join(self.working_directory, 'eve.json'), 'json',
                'SuricataEventLog.json')

        # Add the stats.log to the result, which can be used to determine service success
        if os.path.exists(os.path.join(self.working_directory, 'stats.log')):
            request.add_supplementary(
                os.path.join(self.working_directory, 'stats.log'), 'log',
                'stats.log')

        request.result = result
Esempio n. 15
0
 def _extract_result_from_matches(self, matches):
     result = Result(default_usage=TAG_USAGE.CORRELATION)
     for match in matches:
         self._add_resultinfo_for_match(result, match)
     return result
Esempio n. 16
0
    def execute(self, request):
        if request.task.depth > 3:
            self.log.debug(
                "Cuckoo is exiting because it currently does not execute on great great grand children."
            )
            request.set_save_result(False)
            return
        self.session = requests.Session()
        self.task = request.task
        request.result = Result()
        self.file_res = request.result
        file_content = request.get()
        self.cuckoo_task = None
        self.al_report = None
        self.file_name = os.path.basename(request.path)

        full_memdump = False
        pull_memdump = False

        # Check the file extension
        original_ext = self.file_name.rsplit('.', 1)
        tag_extension = tag_to_extension.get(self.task.tag)

        # NOTE: Cuckoo still tries to identify files itself, so we only force the extension/package if the user
        # specifies one. However, we go through the trouble of renaming the file because the only way to have
        # certain modules run is to use the appropriate suffix (.jar, .vbs, etc.)

        # Check for a valid tag
        if tag_extension is not None and 'unknown' not in self.task.tag:
            file_ext = tag_extension
        # Check if the file was submitted with an extension
        elif len(original_ext) == 2:
            submitted_ext = original_ext[1]
            if submitted_ext not in SUPPORTED_EXTENSIONS:
                # This is the case where the submitted file was NOT identified, and  the provided extension
                # isn't in the list of extensions that we explicitly support.
                self.log.debug(
                    "Cuckoo is exiting because it doesn't support the provided file type."
                )
                request.set_save_result(False)
                return
            else:
                # This is a usable extension. It might not run (if the submitter has lied to us).
                file_ext = '.' + submitted_ext
        else:
            # This is unknown without an extension that we accept/recognize.. no scan!
            self.log.debug(
                "Cuckoo is exiting because the file type could not be identified. %s %s"
                % (tag_extension, self.task.tag))
            return

        # Rename based on the found extension.
        if file_ext and self.task.sha256:
            self.file_name = self.task.sha256 + file_ext

        # Parse user-specified options
        kwargs = dict()
        task_options = []

        analysis_timeout = request.get_param('analysis_timeout')

        generate_report = request.get_param('generate_report')
        if generate_report is True:
            self.log.debug("Setting generate_report flag.")

        dump_processes = request.get_param('dump_processes')
        if dump_processes is True:
            self.log.debug("Setting procmemdump flag in task options")
            task_options.append('procmemdump=yes')

        dll_function = request.get_param('dll_function')
        if dll_function:
            task_options.append('function={}'.format(dll_function))

        arguments = request.get_param('arguments')
        if arguments:
            task_options.append('arguments={}'.format(arguments))

        # Parse extra options (these aren't user selectable because they are dangerous/slow)
        if request.get_param('pull_memory') and request.task.depth == 0:
            pull_memdump = True

        if request.get_param('dump_memory') and request.task.depth == 0:
            # Full system dump and volatility scan
            full_memdump = True
            kwargs['memory'] = True

        if request.get_param('no_monitor'):
            task_options.append("free=yes")

        routing = request.get_param('routing')
        if routing is None:
            routing = self.enabled_routes[0]

        select_machine = self.find_machine(self.task.tag, routing)

        if select_machine is None:
            # No matching VM and no default
            self.log.debug(
                "No Cuckoo vm matches tag %s and no machine is tagged as default."
                % select_machine)
            request.set_save_result(False)
            return

        kwargs['timeout'] = analysis_timeout
        kwargs['options'] = ','.join(task_options)
        if select_machine:
            kwargs['machine'] = select_machine

        self.cuckoo_task = CuckooTask(self.file_name, **kwargs)

        if self.restart_interval <= 0 or not self.is_cuckoo_ready():
            cuckoo_up = self.trigger_cuckoo_reset()
            if not cuckoo_up:
                self.session.close()
                raise RecoverableError(
                    "While restarting Cuckoo, Cuckoo never came back up.")
        else:
            self.restart_interval -= 1

        try:
            self.cuckoo_submit(file_content)
            if self.cuckoo_task.report:
                try:
                    machine_name = None
                    report_info = self.cuckoo_task.report.get('info', {})
                    machine = report_info.get('machine', {})

                    if isinstance(machine, dict):
                        machine_name = machine.get('name')

                    if machine_name is None:
                        self.log.debug(
                            'Unable to retrieve machine name from result.')
                        guest_ip = ""
                    else:
                        guest_ip = self.report_machine_info(machine_name)
                    self.log.debug(
                        "Generating AL Result from Cuckoo results..")
                    success = generate_al_result(self.cuckoo_task.report,
                                                 self.file_res, file_ext,
                                                 guest_ip,
                                                 self.SERVICE_CLASSIFICATION)
                    if success is False:
                        err_str = self.get_errors()
                        if "Machinery error: Unable to restore snapshot" in err_str:
                            raise RecoverableError(
                                "Cuckoo is restarting container: %s", err_str)

                        raise CuckooProcessingException(
                            "Cuckoo was unable to process this file. %s",
                            err_str)
                except RecoverableError:
                    self.trigger_cuckoo_reset(5)
                    raise
                except Exception as e:
                    # This is non-recoverable unless we were stopped during processing
                    self.trigger_cuckoo_reset(1)
                    if self.should_run:
                        self.log.exception("Error generating AL report: ")
                        raise CuckooProcessingException(
                            "Unable to generate cuckoo al report for task %s: %s"
                            % (safe_str(self.cuckoo_task.id), safe_str(e)))

                if self.check_stop():
                    raise RecoverableError(
                        "Cuckoo stopped during result processing..")

                if generate_report is True:
                    self.log.debug("Generating cuckoo report tar.gz.")

                    # Submit cuckoo analysis report archive as a supplementary file
                    tar_report = self.cuckoo_query_report(self.cuckoo_task.id,
                                                          fmt='all',
                                                          params={'tar': 'gz'})
                    if tar_report is not None:
                        tar_report_path = os.path.join(self.working_directory,
                                                       "cuckoo_report.tar.gz")
                        try:
                            report_file = open(tar_report_path, 'w')
                            report_file.write(tar_report)
                            report_file.close()
                            self.task.add_supplementary(
                                tar_report_path,
                                "Cuckoo Sandbox analysis report archive (tar.gz)"
                            )
                        except:
                            self.log.exception(
                                "Unable to add tar of complete report for task %s"
                                % self.cuckoo_task.id)

                self.log.debug("Checking for dropped files and pcap.")
                # Submit dropped files and pcap if available:
                self.check_dropped(request, self.cuckoo_task.id)
                self.check_pcap(self.cuckoo_task.id)

                # Check process memory dumps
                if dump_processes is True:
                    self.download_memdump('procmemdump')

                # We only retrieve full memory dumps for top-level files, and only if it was specified in
                # extra options.
                if full_memdump and pull_memdump:
                    self.download_memdump('fullmemdump')
            else:
                # We didn't get a report back.. cuckoo has failed us
                if self.should_run:
                    self.trigger_cuckoo_reset(5)
                    self.log.info("Raising recoverable error for running job.")
                    raise RecoverableError(
                        "Unable to retrieve cuckoo report. The following errors were detected: %s"
                        % safe_str(self.cuckoo_task.errors))

        except Exception as e:
            # Delete the task now..
            self.log.info('General exception caught during processing: %s' % e)
            if self.cuckoo_task and self.cuckoo_task.id is not None:
                self.cuckoo_delete_task(self.cuckoo_task.id)
            self.session.close()

            # Send the exception off to ServiceBase
            raise

        # Delete and exit
        if self.cuckoo_task and self.cuckoo_task.id is not None:
            self.cuckoo_delete_task(self.cuckoo_task.id)

        self.session.close()
Esempio n. 17
0
        # pprint.pprint(file_res)


if __name__ == "__main__":

    import sys

    from signify import signed_pe
    import signify

    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(name)s %(levelname)s %(message)s')

    file_io = BytesIO(open(sys.argv[1], "rb").read())
    res = Result()
    PEFile.get_signify(file_io, res)
    # try:
    #     s_data = signed_pe.SignedPEFile(file_io)
    # except Exception as e:
    #     print "Exception parsing data"
    #     traceback.print_exc()
    #     msg = e.message
    #
    # try:
    #     s_data.verify()
    # except Exception as e:
    #     traceback.print_exc()
    #     print "====="
    #     print e.message
    #
Esempio n. 18
0
    def parse_api(data):
        result = Result()

        # Info block
        hash_info = data.get('hash_info')
        if not hash_info:
            return result
        r_info = ResultSection(title_text='File Info')
        r_info.score = SCORE.NULL
        r_info.add_line('Received Data: %s-%s-%s' %
                        (data['received_date'][:4], data['received_date'][4:6],
                         data['received_date'][6:]))
        r_info.add_line('Size: %s' % hash_info.get('filesize', ""))
        r_info.add_line('MD5: %s' % hash_info.get('md5', ""))
        r_info.add_line('SHA1: %s' % hash_info.get('sha1', ""))
        r_info.add_line('SHA256: %s' % hash_info.get('sha256', ""))
        r_info.add_line('SSDeep Blocksize: %s' %
                        hash_info.get('ssdeep_blocksize', ""))
        r_info.add_line('SSDeep Hash1: %s' % hash_info.get('ssdeep_hash1', ""))
        r_info.add_line('SSDeep Hash2: %s' % hash_info.get('ssdeep_hash1', ""))
        result.add_result(r_info)

        callouts = data.get('callouts', [])
        if len(callouts) > 0:
            max_callouts = 10
            r_callouts = ResultSection(title_text='Sandbox Call-Outs')
            r_callouts.score = SCORE.VHIGH
            analyser = ''
            r_call_sub_section = None

            reported_count = 0
            for callout in callouts:
                reported_count += 1
                if reported_count <= max_callouts:
                    if analyser != callout['ip']:
                        title = '%s (Analysed on %s)' % (callout['ip'],
                                                         callout['addedDate'])
                        r_call_sub_section = ResultSection(title_text=title,
                                                           parent=r_callouts)
                        analyser = callout['ip']

                    channel = callout['channel']
                    if channel is not None:
                        channel = "(%s)" % channel.split('~~')[0]
                    else:
                        channel = ""

                    r_call_sub_section.add_line("{0:s}:{1:d}{2:s}".format(
                        callout['callout'], callout['port'], channel))

                try:
                    p1, p2, p3, p4 = callout['callout'].split(".")
                    if int(p1) <= 255 and int(p2) <= 255 and int(
                            p3) <= 255 and int(p4) <= 255:
                        result.append_tag(
                            Tag(TAG_TYPE.NET_IP,
                                callout['callout'],
                                TAG_WEIGHT.MED,
                                context=Context.BEACONS))
                except ValueError:
                    result.append_tag(
                        Tag(TAG_TYPE.NET_DOMAIN_NAME,
                            callout['callout'],
                            TAG_WEIGHT.MED,
                            context=Context.BEACONS))

                if callout['port'] != 0:
                    result.append_tag(
                        Tag(TAG_TYPE.NET_PORT,
                            str(callout['port']),
                            TAG_WEIGHT.MED,
                            context=Context.BEACONS))

            if len(callouts) > max_callouts:
                r_callouts.add_line("And %s more..." % str(len(callouts) - 10))
            result.add_result(r_callouts)

        spamcount = data.get('spamCount', {})
        if spamcount:
            r_spam = ResultSection(title_text='SPAM feed')
            r_spam.score = SCORE.VHIGH
            r_spam.add_line('Found %d related spam emails' %
                            spamcount['count'])
            email_sample = spamcount.get("email_sample", {})
            r_spam.add_line('\tFirst Seen: %s' % email_sample['firstSeen'])
            r_spam.add_line('\tLast Seen: %s' % email_sample['lastSeen'])
            r_sub_section = ResultSection(title_text='Attachments',
                                          parent=r_spam)
            if email_sample['filename']:
                r_sub_section.add_line(
                    '%s - md5: %s' %
                    (email_sample['filename'], email_sample['filenameMD5']))
            if email_sample['attachment']:
                r_sub_section.add_line('%s - md5: %s' %
                                       (email_sample['attachment'],
                                        email_sample['attachmentMD5']))
            result.add_result(r_spam)

        av_results = data.get('av_results', [])
        if len(av_results) > 0:
            r_av_sec = ResultSection(title_text='Anti-Virus Detections')
            r_av_sec.add_line('Found %d AV hit(s).' % len(av_results))
            for av_result in av_results:
                r_av_sec.add_section(
                    AvHitSection(av_result['scannerID'], av_result['name'],
                                 SCORE.SURE))
                result.append_tag(
                    VirusHitTag(av_result['name'],
                                context="scanner:%s" % av_result['scannerID']))
            result.add_result(r_av_sec)

        return result
Esempio n. 19
0
    def execute(self, request):
        self.request = request
        request.result = Result()
        self.result = self.request.result
        file_path = self.request.download()
        fh = open(file_path, 'rb')
        try:
            self.swf = SWF(fh)
            if self.swf is None:
                raise
        except:
            self.log.exception("Unable to parse srl %s:" % self.request.srl)
            fh.close()
            raise
        self.tag_summary = defaultdict(list)
        self.symbols = {}
        self.binary_data = {}
        self.exported_assets = []
        self.big_buffers = set()
        self.has_product_info = False
        self.anti_decompilation = False
        self.recent_compile = False
        self.disasm_path = None

        header_subsection = ResultSection(score=0, title_text="SWF Header")
        header_subsection.add_line("Version: %d" % self.swf.header.version)
        header_subsection.add_line("FileLength: %d" %
                                   self.swf.header.file_length)
        header_subsection.add_line("FrameSize: %s" %
                                   self.swf.header.frame_size.__str__())
        header_subsection.add_line("FrameRate: %d" %
                                   self.swf.header.frame_rate)
        header_subsection.add_line("FrameCount: %d" %
                                   self.swf.header.frame_count)
        self.result.add_section(header_subsection)

        # Parse Tags
        tag_types = []
        for tag in self.swf.tags:
            self.tag_analyzers.get(SWF_TAGS.get(tag.type), self._dummy)(tag)
            tag_types.append(str(tag.type))
        tag_list = ','.join(tag_types)
        tags_ssdeep = ssdeep.hash(tag_list)
        _, hash_one, hash_two = tags_ssdeep.split(':')
        self.result.add_tag(tag_type=TAG_TYPE.SWF_TAGS_SSDEEP,
                            value=hash_one,
                            weight=TAG_WEIGHT.NULL)
        self.result.add_tag(tag_type=TAG_TYPE.SWF_TAGS_SSDEEP,
                            value=hash_two,
                            weight=TAG_WEIGHT.NULL)
        # Script Overview
        if len(self.symbols.keys()) > 0:
            root_symbol = 'unspecified'
            if 0 in self.symbols:
                root_symbol = self.symbols[0]
                self.symbols.pop(0)
            symbol_subsection = ResultSection(score=SCORE.NULL,
                                              title_text="Symbol Summary")
            symbol_subsection.add_line('Main Timeline: %s' % root_symbol)
            if len(self.symbols.keys()) > 0:
                symbol_subsection.add_line('Other Symbols:')
                for tag_id, name in self.symbols.iteritems():
                    symbol_subsection.add_line('\tTagId: %s\tName: %s' %
                                               (tag_id, name))
            self.result.add_section(symbol_subsection)

        if len(self.binary_data.keys()) > 0:
            self.result.report_heuristic(Swiffer.AL_Swiffer_003)
            binary_subsection = ResultSection(
                score=SCORE.NULL, title_text="Attached Binary Data")
            for tag_id, tag_data in self.binary_data.iteritems():
                tag_name = self.symbols.get(tag_id, 'unspecified')
                binary_subsection.add_line('\tTagId: %s\tName: %s\tSize: %d' %
                                           (tag_id, tag_name, len(tag_data)))
                try:
                    binary_filename = hashlib.sha256(
                        tag_data).hexdigest() + '.attached_binary'
                    binary_path = os.path.join(self.working_directory,
                                               binary_filename)
                    with open(binary_path, 'w') as fh:
                        fh.write(tag_data)
                    self.request.add_extracted(
                        binary_path, "SWF Embedded Binary Data %d" % tag_id,
                        tag_name)
                except:
                    self.log.exception(
                        "Error submitting embedded binary data for swf:")

            self.result.add_section(binary_subsection)

        tags_subsection = ResultSection(score=SCORE.INFO,
                                        title_text="Tags of Interest")
        for tag in sorted(self.tag_summary.keys()):
            tags_subsection.add_line(tag)
            summaries = self.tag_summary[tag]
            for summary in summaries:
                summary_line = '\t' + '\t'.join(summary)
                tags_subsection.add_line(summary_line)
            tags_subsection.add_line('')
        if len(tags_subsection.body) > 0:
            self.result.add_section(tags_subsection)

        if len(self.big_buffers) > 0:
            self.result.report_heuristic(Swiffer.AL_Swiffer_001)
            bbs = ResultSection(score=SCORE.HIGH,
                                title_text="Large String Buffers")
            for buf in self.big_buffers:
                bbs.add_line("Found a %d byte string." % len(buf))
                buf_filename = ""
                try:
                    buf_filename = hashlib.sha256(
                        buf).hexdigest() + '.stringbuf'
                    buf_path = os.path.join(self.working_directory,
                                            buf_filename)
                    with open(buf_path, 'w') as fh:
                        fh.write(buf)
                    self.request.add_extracted(buf_path,
                                               "AVM2 Large String Buffer.")
                except:
                    self.log.exception(
                        "Error submitting AVM2 String Buffer %s" %
                        buf_filename)
            self.result.add_section(bbs)

        if not self.has_product_info:
            self.log.debug("Missing product info.")
            no_info = ResultSection(score=SCORE.INFO,
                                    title_text="Missing Product Information")
            no_info.add_line(
                "This SWF doesn't specify information about the product that created it."
            )
            self.result.add_section(no_info)

        if self.anti_decompilation:
            self.result.report_heuristic(Swiffer.AL_Swiffer_004)
            self.log.debug("Anti-disassembly techniques may be present.")
            no_dis = ResultSection(score=SCORE.LOW,
                                   title_text="Incomplete Disassembly")
            no_dis.add_line(
                "This SWF may contain intentional corruption or obfuscation to prevent disassembly."
            )

            self.result.add_section(no_dis)

        if self.recent_compile:
            recent_compile = ResultSection(score=SCORE.LOW,
                                           title_text="Recent Compilation")
            recent_compile.add_line(
                "This SWF was compiled within the last 24 hours.")
            self.result.add_section(recent_compile)
            self.result.report_heuristic(Swiffer.AL_Swiffer_002)

        fh.close()
Esempio n. 20
0
    def execute(self, request):
        # Create a result object where all the sections will be stored
        result = Result()

        # ==================================================================
        # Default Section:
        #     Default section basically just dumps the text to the screen...
        #       All sections scores will be SUMed in the service result
        #       The Result classification will be the highest classification found in the sections
        default_section = ResultSection(SCORE.LOW,
                                        'Example of a default section',
                                        Classification.RESTRICTED)
        default_section.add_line("You can add line by line!")
        default_section.add_lines(["Or", "Multiple lines", "Inside a list!"])

        # ==================================================================
        # Color map Section:
        #     Creates a color map bar using a minimum and maximum domain
        cmap_min = 0
        cmap_max = 20
        color_map_data = {
            'type': 'colormap',
            'data': {
                'domain': [cmap_min, cmap_max],
                'values': [random.random() * cmap_max for _ in xrange(50)]
            }
        }
        section_color_map = ResultSection(SCORE.NULL,
                                          "Example of colormap result section",
                                          self.SERVICE_CLASSIFICATION,
                                          body_format=TEXT_FORMAT.GRAPH_DATA,
                                          body=json.dumps(color_map_data))

        # ==================================================================
        # URL section:
        #     Generate a list of clickable urls using a json encoded format
        url_section = ResultSection(SCORE.NULL,
                                    'Example of a simple url section',
                                    self.SERVICE_CLASSIFICATION,
                                    body_format=TEXT_FORMAT.URL,
                                    body=json.dumps({
                                        "name":
                                        "Google",
                                        "url":
                                        "https://www.google.com/"
                                    }))

        # You can add tags to any section although those tag will be brought up to the result object
        #     Tags are defined by a type, value and weight (confidence lvl)
        #         you can also add a classification and context if needed
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME, "google.com",
                            TAG_WEIGHT.LOW)
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME,
                            "bob.com",
                            TAG_WEIGHT.LOW,
                            classification=Classification.RESTRICTED)
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME,
                            "baddomain.com",
                            TAG_WEIGHT.LOW,
                            context=Context.BEACONS)

        # You may also want to provide a list of url! Also, No need to provide a name, the url link will be displayed
        urls = [{
            "url": "https://google.com/"
        }, {
            "url": "https://google.ca/"
        }, {
            "url": "https://microsoft.com/"
        }]
        url_section2 = ResultSection(
            SCORE.MED,
            'Example of a url section with multiple links',
            self.SERVICE_CLASSIFICATION,
            body_format=TEXT_FORMAT.URL,
            body=json.dumps(urls))
        # Add url_section2 as a subsection of url section
        #     The score of the subsections will automatically be added to the parent section
        url_section.add_section(url_section2)

        # ==================================================================
        # Memory dump section:
        #     Dump whatever string content you have into a <pre/> html tag so you can do your own formatting
        data = hexdump(
            "This is some random text that we will format as an hexdump and you'll see "
            "that the hexdump formatting will be preserved by the memory dump section!"
        )
        memdump_section = ResultSection(SCORE.NULL,
                                        'Example of a memory dump section',
                                        self.SERVICE_CLASSIFICATION,
                                        body_format=TEXT_FORMAT.MEMORY_DUMP,
                                        body=data)

        # ==================================================================
        # Re-Submitting files to the system
        #     Adding extracted files will have them resubmitted to the system for analysis
        if request.srl != '8cf8277a71e85122bf7ea4610c7cfcc0bfb6dee799be50a41b2f4b1321b3317f':
            # This IF just prevents resubmitting the same file in a loop for this exemple...
            temp_path = tempfile.mktemp(dir=self.working_directory)
            with open(temp_path, "w") as myfile:
                myfile.write(data)
            request.add_extracted(temp_path,
                                  "Extracted by some random magic!",
                                  display_name="file.txt")

        # ==================================================================
        # Supplementary files
        #     Adding supplementary files will save them on the datastore for future
        #      reference but wont reprocess those files.
        temp_path = tempfile.mktemp(dir=self.working_directory)
        with open(temp_path, "w") as myfile:
            myfile.write(json.dumps(urls))
        request.add_supplementary(temp_path,
                                  "These are urls as a JSON",
                                  display_name="urls.json")

        # ==================================================================
        # Wrap-up:
        #     Add all sections to the Result object
        result.add_section(default_section)
        result.add_section(section_color_map)
        result.add_section(url_section)
        result.add_section(memdump_section)
        request.result = result
Esempio n. 21
0
    def populate_result(self, current_lines, filename, request):
        result = Result()

        should_filter_out = False
        dump_sign_tool_output = False
        skip_detailed_output = False

        status_line = current_lines[1]
        if len(current_lines) <= 1 or status_line == "\tVerified:\tUnsigned":
            return result

        elif status_line.find("\tVerified:") != 0 or                   \
                status_line == "\tVerified:\tUntrusted Root" or           \
                status_line == "\tVerified:\tUntrusted Authority" or      \
                status_line == "\tVerified:\tUntrusted Certificate" or    \
                status_line == "\tVerified:\tMalformed" or                \
                status_line == "\tVerified:\tInvalid Chain":
            # This file has a signature but is not verified.
            result_section = ResultSection(
                score=SCORE.HIGH,
                title_text=("This file has an invalid/untrusted signature."
                            "The file might have been modified or the "
                            "signature is just a fake one."))
            dump_sign_tool_output = True
            result.report_heuristic(SigCheck.AL_SigCheck_001)

        elif status_line == "\tVerified:\tExpired":
            # This file has a signature but is not verified.
            result_section = ResultSection(
                score=SCORE.LOW,
                title_text="This file has an expired signature.")
            dump_sign_tool_output = True
            result.report_heuristic(SigCheck.AL_SigCheck_002)

        elif status_line == "\tVerified:\tSigned":
            is_authorised_signers = False
            # Build the list of signers
            signers = []
            signers_tag_found = False
            i = 0
            while i < len(current_lines):
                if signers_tag_found:
                    if current_lines[i][0:2] == '\t\t':
                        # Skip the first two tabs.
                        signers.append(current_lines[i][2:])
                    else:
                        break
                elif current_lines[i].find("\tSigners:") == 0:
                    signers_tag_found = True
                i += 1

            for trusted_name_item in self.trusted_name_list:
                if trusted_name_item == signers:
                    is_authorised_signers = True
                    break

            if is_authorised_signers:
                result_section = ResultSection(
                    score=SCORE.NOT,
                    title_text="This file is signed with trusted signers")
                result.report_heuristic(SigCheck.AL_SigCheck_003)
                should_filter_out = True

            else:
                result_section = ResultSection(
                    score=SCORE.INFO,
                    title_text=
                    "Signed with signers we don't automatically filter out")
                result.report_heuristic(SigCheck.AL_SigCheck_004)

        else:
            self.log.error(
                "The sigcheck output:\n%s\ncontained unexpected results %s" %
                ("\n".join(current_lines)))
            result_section = ResultSection(
                score=SCORE.MED,
                title_text="Unexpected result from sigcheck ... to investigate."
            )
            result.report_heuristic(SigCheck.AL_SigCheck_005)

        if should_filter_out and not request.ignore_filtering:
            request.drop()

        if skip_detailed_output:
            result.add_section(result_section)
            return result

        # Expand our result with the sigcheck output.
        self._add_sigcheck_output(current_lines, result_section)

        # Optionally expand our result with the signtool output.
        if dump_sign_tool_output:
            self._add_signtool_output(filename, result_section)

        result.add_section(result_section)
        return result
Esempio n. 22
0
    def execute(self, request):
        """
        Main Module.
        """
        result = Result()
        request.result = result

        if (request.task.size or 0) < 50000 and (
                request.tag.startswith('code') or
            (request.tag == "unknown" and (request.task.size or 0) < 5000)):
            patterns = PatternMatch()

            alfile = request.download()
            with open(alfile, "rb") as f:
                raw = f.read()

            # Get all IOCs that originally hit in file (to filter later- service FrankenStrings SHOULD catch it anyways)
            pat_values = patterns.ioc_match(raw,
                                            bogon_ip=True,
                                            just_network=False)
            before = []
            for k, val in pat_values.iteritems():
                if val == "":
                    asc_asc = unicodedata.normalize('NFKC', val).encode(
                        'ascii', 'ignore')
                    before.append(asc_asc)
                else:
                    for v in val:
                        before.append(v)

            # --- Stage 1 ----------------------------------------------------------------------------------------------
            # Get script(s) that we want
            code_extracts = [('^unknown$', self.convert_wide_unicode),
                             ('.*html.*', self.extract_htmlscript)]

            extracted_parts = None
            for tu in code_extracts:
                if re.match(re.compile(tu[0]), request.tag):
                    extracted_parts = tu[1](raw)
                    break
            if extracted_parts:
                parsed = [x for x in extracted_parts]
            else:
                parsed = [raw]

            # --- Stage 2 ----------------------------------------------------------------------------------------------
            # Hack time!
            for script in parsed:
                extract_file = False
                layer = script
                layers_list = []

                if request.deep_scan:
                    self.max_attempts = 50

                techniques = [
                    ('VBE Decode', self.vbe_decode, True),
                    ('MSWord macro vars', self.mswordmacro_vars, False),
                    ('Powershell vars', self.powershell_vars, False),
                    ('Concat strings', self.concat_strings, False),
                    ('String replace', self.string_replace, False),
                    ('Powershell carets', self.powershell_carets, False),
                    ('Array of strings', self.array_of_strings, False),
                    ('Fake array vars', self.vars_of_fake_arrays, False),
                    ('Simple XOR function', self.simple_xor_function, False),
                    ('Charcode', self.charcode, False),
                    ('Charcode hex', self.charcode_hex, False),
                    ('B64 Decode', self.b64decode_str, False)
                ]

                done = False
                idx = 0
                while not done:
                    if idx > self.max_attempts:
                        break
                    done = True
                    for name, technique, extract in techniques:
                        final, res = technique(layer)
                        if res:
                            layers_list.append((name, res))
                            if extract:
                                extract_file = True
                            # Looks like it worked, restart with new layer
                            layer = res
                            done = final
                            if done:
                                break
                    idx += 1

                if len(layers_list) > 0:
                    final_score = len(layers_list) * 10
                    clean = self.clean_up_final_layer(layers_list[-1][1])
                    if clean != raw:
                        pat_values = patterns.ioc_match(clean,
                                                        bogon_ip=True,
                                                        just_network=False)
                        after = []
                        for k, val in pat_values.iteritems():
                            if val == "":
                                asc_asc = unicodedata.normalize(
                                    'NFKC', val).encode('ascii', 'ignore')
                                after.append(asc_asc)
                            else:
                                for v in val:
                                    after.append(v)
                        diff_tags = list(
                            set(before).symmetric_difference(set(after)))
                        # Add additional checks to see if the file should be extracted. 1500 is an arbitrary score...
                        if (len(clean) > 1000 and final_score > 500) or (
                                len(before) < len(after)):
                            extract_file = True
                        res = (ResultSection(
                            SCORE.NULL,
                            "CrowBar detected possible obfuscated script:"))
                        mres = (ResultSection(
                            SCORE.NULL,
                            "The following CrowBar modules made deofuscation attempts:",
                            parent=res))
                        mres.score = final_score
                        lcount = Counter([x[0] for x in layers_list])
                        for l, c in lcount.iteritems():
                            mres.add_line("{0}, {1} time(s).".format(l, c))
                        if extract_file:
                            self.submit_extracted(clean, res, request)
                        # Display final layer
                        lres = (ResultSection(
                            SCORE.NULL,
                            "Final layer:",
                            body_format=TEXT_FORMAT.MEMORY_DUMP,
                            parent=res))
                        if extract_file:
                            lres.add_line("First 500 bytes of file:")
                            lres.add_line(clean[:500])
                        else:
                            lres.add_line("First 5000 bytes of file:")
                            lres.add_line(clean[:5000])
                        # Look for all IOCs in final layer
                        if len(pat_values) > 0 and len(diff_tags) > 0:
                            for ty, val in pat_values.iteritems():
                                if val == "":
                                    asc_asc = unicodedata.normalize(
                                        'NFKC', val).encode('ascii', 'ignore')
                                    if asc_asc in diff_tags:
                                        res.add_tag(TAG_TYPE[ty], asc_asc,
                                                    TAG_WEIGHT.LOW)
                                else:
                                    for v in val:
                                        if v in diff_tags:
                                            res.add_tag(
                                                TAG_TYPE[ty], v,
                                                TAG_WEIGHT.LOW)
                        result.add_result(res)
Esempio n. 23
0
 def execute(self, request):
     request.result = Result()
     local_path = request.download()
     self.run_tosl(local_path, request)
Esempio n. 24
0
    def parse_direct_db(self, response):
        result = Result()

        res = self.lookup_source(response)
        if res:
            # Display source frequency if found
            result.add_section(res)

            res = self.lookup_upatre_downloader(response)
            if res:
                # Display Upatre data
                result.add_section(res)

            res, tags = self.lookup_callouts(response)
            if res:
                # Display Call-Outs
                result.add_section(res)

                # Add domain, ip and port tags
                _ = [result.append_tag(tag) for tag in tags]

            res = self.lookup_spam_feed(response)
            if res:
                # Display info from SPAM feed
                result.add_section(res)

            res, tags = self.lookup_av_hits(response)
            if res:
                # Display Anti-virus result
                result.add_section(res)

                # Add Virus Tags
                _ = [result.append_tag(tag) for tag in tags]

        return result