Exemple #1
0
            # Auto generate signatures...
            self.auto_generate_tags(self.file_res)

            # Here is general PE info
            self.get_pe_info(G_LCID)

            file_io = BytesIO(file_content)

            extracted_data = None
            try:
                extracted_data = PEFile.get_signify(file_io, self.file_res, self.log)
            except Exception as e:
                res = ResultSection(SCORE.NULL, "Error trying to check for PE signatures")
                res.add_line("Traceback:")
                res.add_lines(traceback.format_exc().splitlines())

                self.file_res.add_section(res)

    @staticmethod
    def get_signify(file_handle, res, log = None):

        if log == None:
            log = logging.getLogger("get_signify")
        else:
            log = log.getChild("get_signify")

        # first, let's try parsing the file
        try:
            s_data = signed_pe.SignedPEFile(file_handle)
        except Exception as e:
Exemple #2
0
    def get_signify(file_handle, res, log = None):

        if log == None:
            log = logging.getLogger("get_signify")
        else:
            log = log.getChild("get_signify")

        # first, let's try parsing the file
        try:
            s_data = signed_pe.SignedPEFile(file_handle)
        except Exception as e:
            log.error("Error parsing. May not be a valid PE? Traceback: %s" % traceback.format_exc())

        # Now try checking for verification
        try:
            s_data.verify()

            # signature is verified
            res.add_section(ResultSection(SCORE.OK, "This file is signed"))
            res.report_heuristic(PEFile.AL_PEFile_002)

        except signify.exceptions.SignedPEParseError as e:
            if e.message == "The PE file does not contain a certificate table.":
                res.add_section(ResultSection(SCORE.NULL, "No file signature data found"))

            else:
                res.add_section(ResultSection(SCORE.NULL, "Unknown exception. Traceback: %s" % traceback.format_exc()))

        except signify.exceptions.AuthenticodeVerificationError as e:
            if e.message == "The expected hash does not match the digest in SpcInfo":
                # This sig has been copied from another program
                res.add_section(ResultSection(SCORE.HIGH, "The signature does not match the program data"))
                res.report_heuristic(PEFile.AL_PEFile_001)
            else:
                res.add_section(ResultSection(SCORE.NULL, "Unknown authenticode exception. Traceback: %s" % traceback.format_exc()))

        except signify.exceptions.VerificationError as e:
            if e.message.startswith("Chain verification from"):
                # probably self signed
                res.add_section(ResultSection(SCORE.MED, "File is self-signed"))
                res.report_heuristic(PEFile.AL_PEFile_003)
            else:
                res.add_section(
                    ResultSection(SCORE.NULL, "Unknown exception. Traceback: %s" % traceback.format_exc()))


        # Now try to get certificate and signature data
        sig_datas = []
        try:
            sig_datas.extend([x for x in s_data.signed_datas])
        except:
            pass

        if len(sig_datas) > 0:
            # Now extract certificate data from the sig
            for s in sig_datas:
                # Extract signer info. This is probably the most useful?
                res.add_tag(TAG_TYPE.CERT_SERIAL_NO, str(s.signer_info.serial_number))
                res.add_tag(TAG_TYPE.CERT_ISSUER, s.signer_info.issuer_dn)

                # Get cert used for signing, then add valid from/to info
                for cert in [x for x in s.certificates if x.serial_number == s.signer_info.serial_number]:
                    res.add_tag(TAG_TYPE.CERT_SUBJECT, cert.subject_dn)
                    res.add_tag(TAG_TYPE.CERT_VALID_FROM, cert.valid_from.isoformat())
                    res.add_tag(TAG_TYPE.CERT_VALID_TO, cert.valid_to.isoformat())

                for cert in s.certificates:
                    cert_res = ResultSection(SCORE.NULL, "Certificate Information")
                    # x509 CERTIFICATES
                    # ('CERT_VERSION', 230),
                    # ('CERT_SERIAL_NO', 231),
                    # ('CERT_SIGNATURE_ALGO', 232),
                    # ('CERT_ISSUER', 233),
                    # ('CERT_VALID_FROM', 234),
                    # ('CERT_VALID_TO', 235),
                    # ('CERT_SUBJECT', 236),
                    # ('CERT_KEY_USAGE', 237),
                    # ('CERT_EXTENDED_KEY_USAGE', 238),
                    # ('CERT_SUBJECT_ALT_NAME', 239),
                    # ('CERT_THUMBPRINT', 240),

                    # probably not worth doing tags for all this info?
                    cert_res.add_lines(["CERT_VERSION: %d" % cert.version,
                                        "CERT_SERIAL_NO: %d" % cert.serial_number,
                                        "CERT_ISSUER: %s" % cert.issuer_dn,
                                        "CERT_SUBJECT: %s" % cert.subject_dn,
                                        "CERT_VALID_FROM: %s" % cert.valid_from.isoformat(),
                                        "CERT_VALID_TO: %s" % cert.valid_to.isoformat()])
                    # cert_res.add_tag(TAG_TYPE.CERT_VERSION, str(cert.version))
                    # cert_res.add_tag(TAG_TYPE.CERT_SERIAL_NO, str(cert.serial_number))
                    # cert_res.add_tag(TAG_TYPE.CERT_ISSUER, cert.issuer_dn)
                    # cert_res.add_tag(TAG_TYPE.CERT_VALID_FROM, cert.valid_from.isoformat())
                    # cert_res.add_tag(TAG_TYPE.CERT_VALID_TO, cert.valid_to.isoformat())
                    # cert_res.add_tag(TAG_TYPE.CERT_SUBJECT, cert.subject_dn)

                    res.add_section(cert_res)
Exemple #3
0
    def get_signify(file_handle, res, log=None):

        if log == None:
            log = logging.getLogger("get_signify")
        else:
            log = log.getChild("get_signify")

        # first, let's try parsing the file
        try:
            s_data = signed_pe.SignedPEFile(file_handle)
        except Exception as e:
            log.error("Error parsing. May not be a valid PE? Traceback: %s" %
                      traceback.format_exc())

        # Now try checking for verification
        try:
            s_data.verify()

            # signature is verified
            res.add_section(ResultSection(SCORE.OK, "This file is signed"))
            res.report_heuristic(PEFile.AL_PEFile_002)

        except signify.exceptions.SignedPEParseError as e:
            if e.message == "The PE file does not contain a certificate table.":
                res.add_section(
                    ResultSection(SCORE.NULL, "No file signature data found"))

            else:
                res.add_section(
                    ResultSection(
                        SCORE.NULL, "Unknown exception. Traceback: %s" %
                        traceback.format_exc()))

        except signify.exceptions.AuthenticodeVerificationError as e:
            if e.message == "The expected hash does not match the digest in SpcInfo":
                # This sig has been copied from another program
                res.add_section(
                    ResultSection(
                        SCORE.HIGH,
                        "The signature does not match the program data"))
                res.report_heuristic(PEFile.AL_PEFile_001)
            else:
                res.add_section(
                    ResultSection(
                        SCORE.NULL,
                        "Unknown authenticode exception. Traceback: %s" %
                        traceback.format_exc()))

        except signify.exceptions.VerificationError as e:
            if e.message.startswith("Chain verification from"):
                # probably self signed
                res.add_section(ResultSection(SCORE.MED,
                                              "File is self-signed"))
                res.report_heuristic(PEFile.AL_PEFile_003)
            else:
                res.add_section(
                    ResultSection(
                        SCORE.NULL, "Unknown exception. Traceback: %s" %
                        traceback.format_exc()))

        # Now try to get certificate and signature data
        sig_datas = []
        try:
            sig_datas.extend([x for x in s_data.signed_datas])
        except:
            pass

        if len(sig_datas) > 0:
            # Now extract certificate data from the sig
            for s in sig_datas:
                # Extract signer info. This is probably the most useful?
                res.add_tag(TAG_TYPE.CERT_SERIAL_NO,
                            str(s.signer_info.serial_number))
                res.add_tag(TAG_TYPE.CERT_ISSUER, s.signer_info.issuer_dn)

                # Get cert used for signing, then add valid from/to info
                for cert in [
                        x for x in s.certificates
                        if x.serial_number == s.signer_info.serial_number
                ]:
                    res.add_tag(TAG_TYPE.CERT_SUBJECT, cert.subject_dn)
                    res.add_tag(TAG_TYPE.CERT_VALID_FROM,
                                cert.valid_from.isoformat())
                    res.add_tag(TAG_TYPE.CERT_VALID_TO,
                                cert.valid_to.isoformat())

                for cert in s.certificates:
                    cert_res = ResultSection(SCORE.NULL,
                                             "Certificate Information")
                    # x509 CERTIFICATES
                    # ('CERT_VERSION', 230),
                    # ('CERT_SERIAL_NO', 231),
                    # ('CERT_SIGNATURE_ALGO', 232),
                    # ('CERT_ISSUER', 233),
                    # ('CERT_VALID_FROM', 234),
                    # ('CERT_VALID_TO', 235),
                    # ('CERT_SUBJECT', 236),
                    # ('CERT_KEY_USAGE', 237),
                    # ('CERT_EXTENDED_KEY_USAGE', 238),
                    # ('CERT_SUBJECT_ALT_NAME', 239),
                    # ('CERT_THUMBPRINT', 240),

                    # probably not worth doing tags for all this info?
                    cert_res.add_lines([
                        "CERT_VERSION: %d" % cert.version,
                        "CERT_SERIAL_NO: %d" % cert.serial_number,
                        "CERT_ISSUER: %s" % cert.issuer_dn,
                        "CERT_SUBJECT: %s" % cert.subject_dn,
                        "CERT_VALID_FROM: %s" % cert.valid_from.isoformat(),
                        "CERT_VALID_TO: %s" % cert.valid_to.isoformat()
                    ])
                    # cert_res.add_tag(TAG_TYPE.CERT_VERSION, str(cert.version))
                    # cert_res.add_tag(TAG_TYPE.CERT_SERIAL_NO, str(cert.serial_number))
                    # cert_res.add_tag(TAG_TYPE.CERT_ISSUER, cert.issuer_dn)
                    # cert_res.add_tag(TAG_TYPE.CERT_VALID_FROM, cert.valid_from.isoformat())
                    # cert_res.add_tag(TAG_TYPE.CERT_VALID_TO, cert.valid_to.isoformat())
                    # cert_res.add_tag(TAG_TYPE.CERT_SUBJECT, cert.subject_dn)

                    res.add_section(cert_res)
Exemple #4
0
            self.auto_generate_tags(self.file_res)

            # Here is general PE info
            self.get_pe_info(G_LCID)

            file_io = BytesIO(file_content)

            extracted_data = None
            try:
                extracted_data = PEFile.get_signify(file_io, self.file_res,
                                                    self.log)
            except Exception as e:
                res = ResultSection(SCORE.NULL,
                                    "Error trying to check for PE signatures")
                res.add_line("Traceback:")
                res.add_lines(traceback.format_exc().splitlines())

                self.file_res.add_section(res)

    @staticmethod
    def get_signify(file_handle, res, log=None):

        if log == None:
            log = logging.getLogger("get_signify")
        else:
            log = log.getChild("get_signify")

        # first, let's try parsing the file
        try:
            s_data = signed_pe.SignedPEFile(file_handle)
        except Exception as e:
    def execute(self, request):
        # Create a result object where all the sections will be stored
        result = Result()

        # ==================================================================
        # Default Section:
        #     Default section basically just dumps the text to the screen...
        #       All sections scores will be SUMed in the service result
        #       The Result classification will be the highest classification found in the sections
        default_section = ResultSection(SCORE.LOW,
                                        'Example of a default section',
                                        Classification.RESTRICTED)
        default_section.add_line("You can add line by line!")
        default_section.add_lines(["Or", "Multiple lines", "Inside a list!"])

        # ==================================================================
        # Color map Section:
        #     Creates a color map bar using a minimum and maximum domain
        cmap_min = 0
        cmap_max = 20
        color_map_data = {
            'type': 'colormap',
            'data': {
                'domain': [cmap_min, cmap_max],
                'values': [random.random() * cmap_max for _ in xrange(50)]
            }
        }
        section_color_map = ResultSection(SCORE.NULL,
                                          "Example of colormap result section",
                                          self.SERVICE_CLASSIFICATION,
                                          body_format=TEXT_FORMAT.GRAPH_DATA,
                                          body=json.dumps(color_map_data))

        # ==================================================================
        # URL section:
        #     Generate a list of clickable urls using a json encoded format
        url_section = ResultSection(SCORE.NULL,
                                    'Example of a simple url section',
                                    self.SERVICE_CLASSIFICATION,
                                    body_format=TEXT_FORMAT.URL,
                                    body=json.dumps({
                                        "name":
                                        "Google",
                                        "url":
                                        "https://www.google.com/"
                                    }))

        # You can add tags to any section although those tag will be brought up to the result object
        #     Tags are defined by a type, value and weight (confidence lvl)
        #         you can also add a classification and context if needed
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME, "google.com",
                            TAG_WEIGHT.LOW)
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME,
                            "bob.com",
                            TAG_WEIGHT.LOW,
                            classification=Classification.RESTRICTED)
        url_section.add_tag(TAG_TYPE.NET_DOMAIN_NAME,
                            "baddomain.com",
                            TAG_WEIGHT.LOW,
                            context=Context.BEACONS)

        # You may also want to provide a list of url! Also, No need to provide a name, the url link will be displayed
        urls = [{
            "url": "https://google.com/"
        }, {
            "url": "https://google.ca/"
        }, {
            "url": "https://microsoft.com/"
        }]
        url_section2 = ResultSection(
            SCORE.MED,
            'Example of a url section with multiple links',
            self.SERVICE_CLASSIFICATION,
            body_format=TEXT_FORMAT.URL,
            body=json.dumps(urls))
        # Add url_section2 as a subsection of url section
        #     The score of the subsections will automatically be added to the parent section
        url_section.add_section(url_section2)

        # ==================================================================
        # Memory dump section:
        #     Dump whatever string content you have into a <pre/> html tag so you can do your own formatting
        data = hexdump(
            "This is some random text that we will format as an hexdump and you'll see "
            "that the hexdump formatting will be preserved by the memory dump section!"
        )
        memdump_section = ResultSection(SCORE.NULL,
                                        'Example of a memory dump section',
                                        self.SERVICE_CLASSIFICATION,
                                        body_format=TEXT_FORMAT.MEMORY_DUMP,
                                        body=data)

        # ==================================================================
        # Re-Submitting files to the system
        #     Adding extracted files will have them resubmitted to the system for analysis
        if request.srl != '8cf8277a71e85122bf7ea4610c7cfcc0bfb6dee799be50a41b2f4b1321b3317f':
            # This IF just prevents resubmitting the same file in a loop for this exemple...
            temp_path = tempfile.mktemp(dir=self.working_directory)
            with open(temp_path, "w") as myfile:
                myfile.write(data)
            request.add_extracted(temp_path,
                                  "Extracted by some random magic!",
                                  display_name="file.txt")

        # ==================================================================
        # Supplementary files
        #     Adding supplementary files will save them on the datastore for future
        #      reference but wont reprocess those files.
        temp_path = tempfile.mktemp(dir=self.working_directory)
        with open(temp_path, "w") as myfile:
            myfile.write(json.dumps(urls))
        request.add_supplementary(temp_path,
                                  "These are urls as a JSON",
                                  display_name="urls.json")

        # ==================================================================
        # Wrap-up:
        #     Add all sections to the Result object
        result.add_section(default_section)
        result.add_section(section_color_map)
        result.add_section(url_section)
        result.add_section(memdump_section)
        request.result = result
Exemple #6
0
def process_network(network, al_result, guest_ip, classification):
    global country_code_map
    if not country_code_map:
        country_code_map = forge.get_country_code_map()

    log.debug("Processing network results.")
    result_map = {}

    network_res = ResultSection(title_text="Network Activity",
                                classification=classification,
                                body_format=TEXT_FORMAT.MEMORY_DUMP)
    network_score = 0

    # IP activity
    hosts = network.get("hosts", [])
    if len(hosts) > 0 and isinstance(hosts[0], dict):
        hosts = [host['ip'] for host in network.get("hosts", [])]

    udp = parse_protocol_data(network.get("udp", []), group_fields=['dport'])
    tcp = parse_protocol_data(network.get("tcp", []), group_fields=['dport'])
    smtp = parse_protocol_data(network.get("smtp", []), group_fields=['raw'])
    dns = parse_protocol_data(network.get("dns", []), group_by='request', group_fields=['type'])
    icmp = parse_protocol_data(network.get("icmp", []), group_fields=['type'])

    # Domain activity
    domains = parse_protocol_data(network.get("domains", []), group_by='domain')

    http = parse_protocol_data(network.get("http", []), group_by='host',
                               group_fields=['port', 'uri', 'method'])
    http_ex = parse_protocol_data(network.get("http_ex", []), group_by='host',
                                  group_fields=['dport', 'uri', 'method'])
    _add_ex_data(http, http_ex, 'http', 80)

    https = parse_protocol_data(network.get("https", []), group_by='host',
                                group_fields=['port', 'uri', 'method'])
    https_ex = parse_protocol_data(network.get("https_ex", []), group_by='host',
                                   group_fields=['dport', 'uri', 'method'])
    _add_ex_data(https, https_ex, 'https', 443)

    # Miscellaneous activity
    # irc = network.get("irc")

    # Add missing ip hosts
    for proto in [udp, tcp, http, https, icmp, smtp]:
        for hst in proto.keys():
            if hst not in hosts and re.match(r"^[0-9.]+$", hst):
                hosts.append(hst)

    # network['hosts'] has all unique non-local network ips.
    for host in hosts:
        if host == guest_ip or wlist_check_ip(host):
            continue
        add_host_flows(host, 'udp', udp.get(host), result_map)
        add_host_flows(host, 'tcp', tcp.get(host), result_map)
        add_host_flows(host, 'smtp', smtp.get(host), result_map)
        add_host_flows(host, 'icmp', icmp.get(host), result_map)
        add_host_flows(host, 'http', http.get(host), result_map)
        add_host_flows(host, 'https', https.get(host), result_map)

    if hosts != [] and 'host_flows' not in result_map:
        # This only occurs if for some reason we don't parse corresponding flows out from the
        # network dump. So we'll just manually add the IPs so they're at least being reported.
        result_map['host_flows'] = {}
        for host in hosts:
            if host == guest_ip or wlist_check_ip(host):
                continue
            result_map['host_flows'][host] = []

    for domain in domains:
        if wlist_check_domain(domain):
            continue
        add_domain_flows(domain, 'dns', dns.get(domain), result_map)
        add_domain_flows(domain, 'http', http.get(domain), result_map)
        add_domain_flows(domain, 'https', https.get(domain), result_map)

    if 'host_flows' in result_map:
        # hosts_res = ResultSection(title_text='IP Flows',classification=classification)
        # host_flows is a map of host:protocol entries
        # protocol is a map of protocol_name:flows
        # flows is a set of unique flows by the groupings above
        host_lines = []
        for host in sorted(result_map['host_flows']):
            network_score += 100
            protocols = result_map['host_flows'].get(host, [])
            host_cc = country_code_map[host] or '??'
            host_cc = '('+host_cc+')'
            al_result.add_tag(tag_type=TAG_TYPE.NET_IP, value=host,
                              weight=TAG_WEIGHT.VHIGH, classification=classification,
                              usage="CORRELATION", context=Context.CONNECTS_TO)
            for protocol in sorted(protocols):
                flows = protocols[protocol]
                if 'http' in protocol:
                    for flow in flows:
                        uri = flow.get('uri', None)
                        if uri:
                            al_result.add_tag(tag_type=TAG_TYPE.NET_FULL_URI, value=uri,
                                              weight=TAG_WEIGHT.VHIGH, classification=classification,
                                              usage="CORRELATION", context=Context.CONNECTS_TO)
                flow_lines = dict_list_to_fixedwidth_str_list(flows)
                for line in flow_lines:
                    proto_line = "{0:<8}{1:<19}{2:<8}{3}".format(protocol, host, host_cc, line)
                    host_lines.append(proto_line)

        network_res.add_lines(host_lines)

    if 'domain_flows' in result_map:
        # domains_res = ResultSection(title_text='Domain Flows',classification=classification)
        # host_flows is a map of host:protocol entries
        # protocol is a map of protocol_name:flows
        # flows is a set of unique flows by the groupings above

        # Formatting..
        max_domain_len = 0
        for domain in result_map['domain_flows']:
            max_domain_len = max(max_domain_len, len(domain)+4)
        proto_fmt = "{0:<8}{1:<"+str(max_domain_len)+"}{2}"
        domain_lines = []
        network_score += 100
        for domain in sorted(result_map['domain_flows']):
            protocols = result_map['domain_flows'][domain]
            al_result.add_tag(tag_type=TAG_TYPE.NET_DOMAIN_NAME, value=domain,
                              weight=TAG_WEIGHT.VHIGH, classification=classification, context=Context.CONNECTS_TO)
            for protocol in sorted(protocols):
                flows = protocols[protocol]
                if 'http' in protocol:
                    for flow in flows:
                        uri = flow.get('uri', None)
                        if uri:
                            al_result.add_tag(tag_type=TAG_TYPE.NET_FULL_URI, value=uri,
                                              weight=TAG_WEIGHT.VHIGH, classification=classification,
                                              usage="CORRELATION", context=Context.CONNECTS_TO)
                flow_lines = dict_list_to_fixedwidth_str_list(flows)
                for line in flow_lines:
                    proto_line = proto_fmt.format(protocol, domain, line)
                    domain_lines.append(proto_line)                
#                 domain_res.add_lines(protocol_lines)
#             domains_res.add_section(domain_res)
        network_res.add_lines(domain_lines)
        network_score = min(500, network_score)
    
    if len(network_res.body) > 0:
        network_res.score = network_score
        al_result.add_section(network_res)
    log.debug("Network processing complete.")
Exemple #7
0
    def check_xml_strings(self, path):
        xml_target_res = ResultSection(score=SCORE.NULL, title_text="Attached External Template Targets in XML")
        xml_ioc_res = ResultSection(score=SCORE.NULL, title_text="IOCs in XML:")
        xml_b64_res = ResultSection(score=SCORE.NULL, title_text="Base64 in XML:")
        try:
            template_re = re.compile(r'/attachedTemplate"\s+[Tt]arget="((?!file)[^"]+)"\s+[Tt]argetMode="External"')
            uris = []
            zip_uris = []
            b64results = {}
            b64_extracted = set()
            if zipfile.is_zipfile(path):
                try:
                    patterns = PatternMatch()
                except:
                    patterns = None
                z = zipfile.ZipFile(path)
                for f in z.namelist():
                    data = z.open(f).read()
                    if len(data) > 500000:
                        data = data[:500000]
                        xml_ioc_res.report_heuristics(Oletools.AL_Oletools_003)
                        xml_ioc_res.score = min(xml_ioc_res.score, 1)
                    zip_uris.extend(template_re.findall(data))
                    # Use FrankenStrings modules to find other strings of interest
                    # Plain IOCs
                    if patterns:
                        pat_strs = ["http://purl.org", "schemas.microsoft.com", "schemas.openxmlformats.org",
                                    "www.w3.org"]
                        pat_ends = ["themeManager.xml", "MSO.DLL", "stdole2.tlb", "vbaProject.bin", "VBE6.DLL", "VBE7.DLL"]
                        pat_whitelist = ['Management', 'Manager', "microsoft.com"]

                        st_value = patterns.ioc_match(data, bogon_ip=True)
                        if len(st_value) > 0:
                            for ty, val in st_value.iteritems():
                                if val == "":
                                    asc_asc = unicodedata.normalize('NFKC', val).encode('ascii', 'ignore')
                                    if any(x in asc_asc for x in pat_strs) \
                                            or asc_asc.endswith(tuple(pat_ends)) \
                                            or asc_asc in pat_whitelist:
                                        continue
                                    else:
                                        xml_ioc_res.score += 1
                                        xml_ioc_res.add_line("Found %s string: %s in file %s}"
                                                             % (TAG_TYPE[ty].replace("_", " "), asc_asc, f))
                                        xml_ioc_res.add_tag(TAG_TYPE[ty], asc_asc, TAG_WEIGHT.LOW)
                                else:
                                    ulis = list(set(val))
                                    for v in ulis:
                                        if any(x in v for x in pat_strs) \
                                                or v.endswith(tuple(pat_ends)) \
                                                or v in pat_whitelist:
                                            continue
                                        else:
                                            xml_ioc_res.score += 1
                                            xml_ioc_res.add_line("Found %s string: %s in file %s"
                                                                 % (TAG_TYPE[ty].replace("_", " "), v, f))
                                            xml_ioc_res.add_tag(TAG_TYPE[ty], v, TAG_WEIGHT.LOW)

                    # Base64
                    b64_matches = set()
                    for b64_tuple in re.findall('(([\x20]{0,2}[A-Za-z0-9+/]{3,}={0,2}[\r]?[\n]?){6,})',
                                                data):
                        b64 = b64_tuple[0].replace('\n', '').replace('\r', '').replace(' ', '')
                        uniq_char = ''.join(set(b64))
                        if len(uniq_char) > 6:
                            if len(b64) >= 16 and len(b64) % 4 == 0:
                                b64_matches.add(b64)
                        """
                        Using some selected code from 'base64dump.py' by Didier Stevens@https://DidierStevens.com
                        """
                        for b64_string in b64_matches:
                            try:
                                b64_extract = False
                                base64data = binascii.a2b_base64(b64_string)
                                sha256hash = hashlib.sha256(base64data).hexdigest()
                                if sha256hash in b64_extracted:
                                    continue
                                # Search for embedded files of interest
                                if 500 < len(base64data) < 8000000:
                                    m = magic.Magic(mime=True)
                                    ftype = m.from_buffer(base64data)
                                    if 'octet-stream' not in ftype:
                                        for ft in self.filetypes:
                                            if ft in ftype:
                                                b64_file_path = os.path.join(self.working_directory,
                                                                             "{}_b64_decoded"
                                                                             .format(sha256hash[0:10]))
                                                self.request.add_extracted(b64_file_path,
                                                                           "Extracted b64 file during "
                                                                           "OLETools analysis.")
                                                with open(b64_file_path, 'wb') as b64_file:
                                                    b64_file.write(base64data)
                                                    self.log.debug("Submitted dropped file for analysis: {}"
                                                                   .format(b64_file_path))

                                                b64results[sha256hash] = [len(b64_string), b64_string[0:50],
                                                                          "[Possible base64 file contents in {}. "
                                                                          "See extracted files.]" .format(f), "", ""]

                                                b64_extract = True
                                                b64_extracted.add(sha256hash)
                                                break
                                if not b64_extract and len(base64data) > 30:
                                    if all(ord(c) < 128 for c in base64data):
                                        check_utf16 = base64data.decode('utf-16').encode('ascii', 'ignore')
                                        if check_utf16 != "":
                                            asc_b64 = check_utf16
                                        else:
                                            asc_b64 = self.ascii_dump(base64data)
                                        # If data has less then 7 uniq chars then ignore
                                        uniq_char = ''.join(set(asc_b64))
                                        if len(uniq_char) > 6:
                                            if patterns:
                                                st_value = patterns.ioc_match(asc_b64, bogon_ip=True)
                                                if len(st_value) > 0:
                                                    for ty, val in st_value.iteritems():
                                                        if val == "":
                                                            asc_asc = unicodedata.normalize('NFKC', val)\
                                                                .encode('ascii', 'ignore')
                                                            xml_ioc_res.add_tag(TAG_TYPE[ty], asc_asc, TAG_WEIGHT.LOW)
                                                        else:
                                                            ulis = list(set(val))
                                                            for v in ulis:
                                                                xml_ioc_res.add_tag(TAG_TYPE[ty], v, TAG_WEIGHT.LOW)
                                            b64results[sha256hash] = [len(b64_string), b64_string[0:50], asc_b64,
                                                                          base64data, "{}" .format(f)]
                            except:
                                pass

                b64index = 0
                for b64k, b64l in b64results.iteritems():
                    xml_b64_res.score = 100
                    b64index += 1
                    sub_b64_res = (ResultSection(SCORE.NULL, title_text="Result {0} in file {1}"
                                                 .format(b64index, f), parent=xml_b64_res))
                    sub_b64_res.add_line('BASE64 TEXT SIZE: {}'.format(b64l[0]))
                    sub_b64_res.add_line('BASE64 SAMPLE TEXT: {}[........]'.format(b64l[1]))
                    sub_b64_res.add_line('DECODED SHA256: {}'.format(b64k))
                    subb_b64_res = (ResultSection(SCORE.NULL, title_text="DECODED ASCII DUMP:",
                                                  body_format=TEXT_FORMAT.MEMORY_DUMP,
                                                  parent=sub_b64_res))
                    subb_b64_res.add_line('{}'.format(b64l[2]))
                    if b64l[3] != "":
                        if patterns:
                            st_value = patterns.ioc_match(b64l[3], bogon_ip=True)
                            if len(st_value) > 0:
                                xml_b64_res.score += 1
                                for ty, val in st_value.iteritems():
                                    if val == "":
                                        asc_asc = unicodedata.normalize('NFKC', val).encode\
                                            ('ascii', 'ignore')
                                        xml_b64_res.add_tag(TAG_TYPE[ty], asc_asc, TAG_WEIGHT.LOW)
                                    else:
                                        ulis = list(set(val))
                                        for v in ulis:
                                            xml_b64_res.add_tag(TAG_TYPE[ty], v, TAG_WEIGHT.LOW)
                z.close()
                for uri in zip_uris:
                    if self.parse_uri(uri):
                        uris.append(uri)

                uris = list(set(uris))
                # If there are domains or IPs, report them
                if uris:
                    xml_target_res.score = 500
                    xml_target_res.add_lines(uris)
                    xml_target_res.report_heuristics(Oletools.AL_Oletools_001)

        except Exception as e:
            self.log.debug("Failed to analyze XML: {}".format(e))

        if xml_target_res.score > 0:
            self.ole_result.add_section(xml_target_res)
        if xml_ioc_res.score > 0:
            self.ole_result.add_section(xml_ioc_res)
        if xml_b64_res.score > 0:
            self.ole_result.add_section(xml_b64_res)