Exemple #1
0
def test_rfc822_headers(config, from_header, to_header):
    msg = Message(**config).as_string()
    parsed = HeaderParser().parsestr(msg)

    assert 'from' in parsed and parsed.get('from') == from_header
    assert 'to' in parsed and parsed.get('to') == to_header
    assert 'subject' in parsed
Exemple #2
0
def list_pypi_addons():
    """
    List add-ons available on pypi.
    """
    from ..config import ADDON_PYPI_SEARCH_SPEC
    import xmlrpc.client
    pypi = xmlrpc.client.ServerProxy("http://pypi.python.org/pypi")
    addons = pypi.search(ADDON_PYPI_SEARCH_SPEC)

    for addon in OFFICIAL_ADDONS:
        if not any(a for a in addons if a['name'] == addon):
            versions = pypi.package_releases(addon)
            if versions:
                addons.append({"name": addon, "version": max(versions)})

    multicall = xmlrpc.client.MultiCall(pypi)
    for addon in addons:
        name, version = addon["name"], addon["version"]
        multicall.release_data(name, version)
        multicall.release_urls(name, version)

    results = list(multicall())
    release_data = results[::2]
    release_urls = results[1::2]
    packages = []

    for release, urls in zip(release_data, release_urls):
        if release and urls:
            # ignore releases without actual source/wheel/egg files,
            # or with empty metadata (deleted from PyPi?).
            urls = [ReleaseUrl(url["filename"], url["url"],
                               url["size"], url["python_version"],
                               url["packagetype"])
                    for url in urls]
            packages.append(
                Installable(release["name"], release["version"],
                            release["summary"], release["description"],
                            release["package_url"],
                            urls)
            )

    # Also add installed packages that have the correct keyword but
    # perhaps aren't featured on PyPI
    for dist in pkg_resources.working_set:
        info = HeaderParser().parsestr(
            '\n'.join(dist.get_metadata_lines(dist.PKG_INFO)))
        if ADDON_KEYWORD in info.get('Keywords', ''):
            packages.append(
                Installable(dist.project_name, dist.version,
                            info.get('Summary', ''), info.get('Description', ''),
                            '', [])
            )

    return packages
Exemple #3
0
def list_pypi_addons():
    """
    List add-ons available on pypi.
    """
    from ..config import ADDON_PYPI_SEARCH_SPEC
    import xmlrpc.client
    pypi = xmlrpc.client.ServerProxy("http://pypi.python.org/pypi")
    addons = pypi.search(ADDON_PYPI_SEARCH_SPEC)

    for addon in OFFICIAL_ADDONS:
        if not any(a for a in addons if a['name'] == addon):
            versions = pypi.package_releases(addon)
            if versions:
                addons.append({"name": addon, "version": max(versions)})

    multicall = xmlrpc.client.MultiCall(pypi)
    for addon in addons:
        name, version = addon["name"], addon["version"]
        multicall.release_data(name, version)
        multicall.release_urls(name, version)

    results = list(multicall())
    release_data = results[::2]
    release_urls = results[1::2]
    packages = []

    for release, urls in zip(release_data, release_urls):
        if release and urls:
            # ignore releases without actual source/wheel/egg files,
            # or with empty metadata (deleted from PyPi?).
            urls = [
                ReleaseUrl(url["filename"], url["url"], url["size"],
                           url["python_version"], url["packagetype"])
                for url in urls
            ]
            packages.append(
                Installable(release["name"], release["version"],
                            release["summary"], release["description"],
                            release["package_url"], urls))

    # Also add installed packages that have the correct keyword but
    # perhaps aren't featured on PyPI
    for dist in pkg_resources.working_set:
        info = HeaderParser().parsestr('\n'.join(
            dist.get_metadata_lines(dist.PKG_INFO)))
        if ADDON_KEYWORD in info.get('Keywords', ''):
            packages.append(
                Installable(dist.project_name, dist.version,
                            info.get('Summary', ''),
                            info.get('Description', ''), '', []))

    return packages
Exemple #4
0
 def __emailRawSmtp(self, data):
     msg = HeaderParser().parsestr(data["rawSmtp"])
     emailFrom = msg.get('from')
     emailTo = msg.get_all('to')
     if not emailTo: emailTo = []
     emailCc = msg.get_all('cc')
     if not emailCc: emailCc = []
     emailBcc = msg.get_all('bcc')
     if not emailBcc: emailBcc = []
     msg = data["rawSmtp"].encode("utf8")
     return self.__sendSmtp(msg, emailFrom, emailTo + emailCc + emailBcc)
 def getEmail(self, uid):
     code, cnt = self.M.select()
     typ, a = self.M.search(None, '(UID ' + str(uid) + ')')
     i = int(a[0])
     body_str = self.M.fetch(i, 'RFC822')[1][0][1]
     header_str = self.M.fetch(i, 'RFC822.HEADER')[1][0][1]
     header = HeaderParser().parsestr(header_str, True)
     subject_str = header.get('SUBJECT')
     m_mail = Email()
     m_mail.header = header_str
     m_mail.uid = uid
     m_mail.body = body_str
     return m_mail
 def getEmail(self, uid):
     code, cnt = self.M.select()
     typ, a = self.M.search(None, '(UID ' + str(uid) + ')')
     i = int(a[0])
     body_str = self.M.fetch(i, 'RFC822')[1][0][1]
     header_str = self.M.fetch(i, 'RFC822.HEADER')[1][0][1]
     header = HeaderParser().parsestr(header_str, True)
     subject_str = header.get('SUBJECT')
     m_mail = Email()
     m_mail.header = header_str
     m_mail.uid = uid
     m_mail.body = body_str
     return m_mail
    def listMails(self):
        list = []
        code, cnt = self.M.select()
        for i in range(1, int(cnt[0])+1):
            uid = map(int, re.findall(r'\d+ \(UID (\d+)\)', self.M.fetch(i, '(UID)')[1][0]))[0]

            header_str = self.M.fetch(i, 'RFC822.HEADER')[1][0][1]
            header = HeaderParser().parsestr(header_str, True)
            subject = header.get('SUBJECT')
            m_mail = Email()
            m_mail.header = header_str
            m_mail.uid = uid
            list.append(m_mail)
        return(list)
Exemple #8
0
 def __init__(self, server, uid, folder, header):
     self.server = server
     self.uid = uid
     self.folder = folder
     self.header = header
     # Parse header
     valueByKey = HeaderParser().parsestr(header)
     def getWhom(field):
         return ', '.join(formataddr((self._decode(x), self._decode(y))) for x, y in getaddresses(valueByKey.get_all(field, [])))
     # Extract fields
     self.date = valueByKey.get('date')
     timePack = parsedate_tz(self.date)
     if not timePack:
         self.whenUTC = None
         self.whenLocal = None
     else:
         timeStamp = timegm(timePack) if timePack[-1] is None else mktime_tz(timePack)
         self.whenUTC = datetime.datetime.utcfromtimestamp(timeStamp)
         self.whenLocal = datetime.datetime.fromtimestamp(timeStamp)
     self.subject = self._decode(valueByKey.get('subject', ''))
     self.fromWhom = getWhom('from')
     self.toWhom = getWhom('to')
     self.ccWhom = getWhom('cc')
     self.bccWhom = getWhom('bcc')
    def listMails(self):
        list = []
        code, cnt = self.M.select()
        for i in range(1, int(cnt[0]) + 1):
            uid = map(
                int,
                re.findall(r'\d+ \(UID (\d+)\)',
                           self.M.fetch(i, '(UID)')[1][0]))[0]

            header_str = self.M.fetch(i, 'RFC822.HEADER')[1][0][1]
            header = HeaderParser().parsestr(header_str, True)
            subject = header.get('SUBJECT')
            m_mail = Email()
            m_mail.header = header_str
            m_mail.uid = uid
            list.append(m_mail)
        return (list)
    def _rfc822_string_to_dict(
        cls, rfc822_string: str
    ) -> Dict[str, Union[List[str], str]]:
        """Extracts metadata information from a metadata-version 2.1 object.

        https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata

        - The original key-value format should be read with email.parser.HeaderParser;
        - All transformed keys should be reduced to lower case. Hyphens should
          be replaced with underscores, but otherwise should retain all other
          characters;
        - The transformed value for any field marked with "(Multiple-use")
          should be a single list containing all the original values for the
          given key;
        - The Keywords field should be converted to a list by splitting the
          original value on whitespace characters;
        - The message body, if present, should be set to the value of the
          description key.
        - The result should be stored as a string-keyed dictionary.
        """
        metadata: Dict[str, Union[List[str], str]] = {}
        parsed = HeaderParser().parsestr(rfc822_string)
        metadata_fields = VERSIONED_METADATA_FIELDS[parsed.get("Metadata-Version")]

        for key, value in parsed.items():
            if key in metadata_fields["MULTI"]:
                metadata.setdefault(key, []).append(value)
            elif key in metadata_fields["TREAT_AS_MULTI"]:
                metadata[key] = [val.strip() for val in value.split(",")]
            elif key == "Description":
                metadata[key] = inspect.cleandoc(value)
            else:
                metadata[key] = value

        # Handle the message payload
        payload = parsed.get_payload()
        if payload:
            if "Description" in metadata:
                print("Both Description and payload given - ignoring Description")
            metadata["Description"] = payload

        return _canonicalize(metadata)
Exemple #11
0
 def getDate(self):
     email_hdr = HeaderParser().parsestr(self.header, True)
     date = email_hdr.get('DATE')
     date_parsed = parsedate(date)
     return time.strftime('%d.%m.%Y',
                          time.localtime(time.mktime(date_parsed)))
Exemple #12
0
 def getSubject(self):
     email_hdr = HeaderParser().parsestr(self.header, True)
     subject = email_hdr.get('SUBJECT')
     return subject
    def post(self, request, *args, **kwargs):
        mail_data = request.POST['headers'].strip()
        r = {}
        parsed_headers = HeaderParser().parsestr(mail_data)
        parsed_headers_dict = {}
        major_header_names = {
            'From', 'Message-Id', 'Date', 'Subject', 'To', 'Cc'
        }
        major_headers = {}
        x_headers = {}
        security_header_names = {
            'Received-SPF', 'Authentication-Results', 'DKIM-Signature',
            'ARC-Authentication-Results'
        }
        security_headers = {}
        other_headers = {}
        for header in parsed_headers:
            parsed_headers_dict[header.replace(
                '-', '_')] = parsed_headers.get(header)
            if header in major_header_names:
                major_headers[header] = parsed_headers.get(header)
            elif header.startswith('X-'):
                x_headers[header] = parsed_headers.get(header)
            elif header in security_header_names:
                security_headers[header] = parsed_headers.get(header)
            else:
                other_headers[header] = parsed_headers.get(header)

        graph = []
        received = parsed_headers.get_all('Received')
        if received:
            received = [i for i in received if ('from' in i or 'by' in i)]
        else:
            received = re.findall('Received:\s*(.*?)\n\S+:\s+', mail_data,
                                  re.X | re.DOTALL | re.I)
        c = len(received)
        for i in range(len(received)):
            if ';' in received[i]:
                line = received[i].split(';')
            else:
                line = received[i].split('\r\n')
            line = list(map(str.strip, line))
            line = [x.replace('\r\n', ' ') for x in line]
            try:
                if ';' in received[i + 1]:
                    next_line = received[i + 1].split(';')
                else:
                    next_line = received[i + 1].split('\r\n')
                next_line = list(map(str.strip, next_line))
                next_line = [x.replace('\r\n', '') for x in next_line]
            except IndexError:
                next_line = None

            org_time = dateParser(line[-1])
            if not next_line:
                next_time = org_time
            else:
                next_time = dateParser(next_line[-1])

            if line[0].startswith('from'):
                data = re.findall(
                    """
                    from\s+
                    (.*?)\s+
                    by(.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s|$
                    )""", line[0], re.DOTALL | re.X)
            else:
                data = re.findall(
                    """
                    ()by
                    (.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s
                    )""", line[0], re.DOTALL | re.X)

            delay = (org_time - next_time).seconds
            if delay < 0:
                delay = 0

            try:
                ftime = org_time.utctimetuple()
                ftime = time.strftime('%m/%d/%Y %I:%M:%S %p', ftime)
                r[c] = {
                    'Timestmp':
                    org_time,
                    'Time':
                    ftime,
                    'Delay':
                    delay,
                    'Direction': [
                        x.replace('\n', ' ')
                        for x in list(map(str.strip, data[0]))
                    ]
                }
                c -= 1
            except IndexError:
                pass

        for i in list(r.values()):
            if i['Direction'][0]:
                graph.append(["From: %s" % i['Direction'][0], i['Delay']])
            else:
                graph.append(["By: %s" % i['Direction'][1], i['Delay']])

        totalDelay = sum([x['Delay'] for x in list(r.values())])
        fTotalDelay = mha_tags.duration(totalDelay)
        delayed = True if totalDelay else False

        custom_style = Style(
            background='transparent',
            plot_background='transparent',
            font_family='googlefont:Open Sans',
            # title_font_size=12,
        )
        line_chart = pygal.HorizontalBar(style=custom_style,
                                         height=250,
                                         legend_at_bottom=True,
                                         tooltip_border_radius=10)
        line_chart.tooltip_fancy_mode = False
        line_chart.title = 'Total Delay is: %s' % fTotalDelay
        line_chart.x_title = 'Delay in seconds.'
        for i in graph:
            line_chart.add(i[0], i[1])
        chart = line_chart.render(is_unicode=True)

        context = {
            'hops': sorted(list(r.items()), key=lambda x: x[0]),
            'delayed': delayed,
            'n': parsed_headers,
            'chart': chart,
            'headers': parsed_headers_dict,
            'major_headers': major_headers,
            'x_headers': x_headers,
            'security_headers': security_headers,
            'other_headers': other_headers
        }
        return render(request, self.template_name, context=context)
Exemple #14
0
def index():
    if request.method == 'POST':
        mail_data = request.form['headers'].strip().encode('ascii',
                                                           'ignore').decode()
        r = {}
        n = HeaderParser().parsestr(mail_data)
        graph = []
        received = n.get_all('Received')
        if received:
            received = [i for i in received if ('from' in i or 'by' in i)]
        else:
            received = re.findall('Received:\s*(.*?)\n\S+:\s+', mail_data,
                                  re.X | re.DOTALL | re.I)
        c = len(received)
        for i in range(len(received)):
            if ';' in received[i]:
                line = received[i].split(';')
            else:
                line = received[i].split('\r\n')
            line = list(map(str.strip, line))
            line = list(map(lambda x: x.replace('\r\n', ' '), line))
            try:
                if ';' in received[i + 1]:
                    next_line = received[i + 1].split(';')
                else:
                    next_line = received[i + 1].split('\r\n')
                next_line = list(map(str.strip, next_line))
                next_line = list(
                    map(lambda x: x.replace('\r\n', ''), next_line))
            except IndexError:
                next_line = None

            org_time = dateParser(line[-1])
            if not next_line:
                next_time = org_time
            else:
                next_time = dateParser(next_line[-1])

            if line[0].startswith('from'):
                data = re.findall(
                    """
                    from\s+
                    (.*?)\s+
                    by(.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s|$
                    )""", line[0], re.DOTALL | re.X)
            else:
                data = re.findall(
                    """
                    ()by
                    (.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s
                    )""", line[0], re.DOTALL | re.X)

            # import ipdb; ipdb.set_trace()
            delay = (org_time - next_time).seconds
            if delay < 0:
                delay = 0

            try:
                ftime = org_time.utctimetuple()
                ftime = time.strftime('%m/%d/%Y %I:%M:%S %p', ftime)
                r[c] = {
                    'Timestmp':
                    org_time,
                    'Time':
                    ftime,
                    'Delay':
                    delay,
                    'Direction':
                    list(
                        map(lambda x: x.replace('\n', ' '),
                            list(map(str.strip, data[0]))))
                }
                c -= 1
            except IndexError:
                pass

        for i in r.values():
            if i['Direction'][0]:
                graph.append(["From: %s" % i['Direction'][0], i['Delay']])
            else:
                graph.append(["By: %s" % i['Direction'][1], i['Delay']])

        totalDelay = sum(map(lambda x: x['Delay'], r.values()))
        fTotalDelay = utility_processor()['duration'](totalDelay)
        delayed = True if totalDelay else False

        custom_style = Style(
            background='transparent',
            plot_background='transparent',
            font_family='googlefont:Open Sans',
            # title_font_size=12,
        )
        line_chart = pygal.HorizontalBar(style=custom_style,
                                         height=250,
                                         legend_at_bottom=True,
                                         tooltip_border_radius=10)
        line_chart.tooltip_fancy_mode = False
        line_chart.title = 'Total Delay is: %s' % fTotalDelay
        line_chart.x_title = 'Delay in seconds.'
        for i in graph:
            line_chart.add(i[0], i[1])
        chart = line_chart.render(is_unicode=True)

        summary = {
            'From':
            n.get('From') or getHeaderVal('from', mail_data),
            'To':
            n.get('to') or getHeaderVal('to', mail_data),
            'Cc':
            n.get('cc') or getHeaderVal('cc', mail_data),
            'Subject':
            n.get('Subject') or getHeaderVal('Subject', mail_data),
            'MessageID':
            n.get('Message-ID') or getHeaderVal('Message-ID', mail_data),
            'Date':
            n.get('Date') or getHeaderVal('Date', mail_data),
        }

        security_headers = [
            'Received-SPF', 'Authentication-Results', 'DKIM-Signature',
            'ARC-Authentication-Results'
        ]
        return render_template('index.html',
                               data=r,
                               delayed=delayed,
                               summary=summary,
                               n=n,
                               chart=chart,
                               security_headers=security_headers)
    else:
        return render_template('index.html')
Exemple #15
0
    def __init__(self, filename: Path):
        """Init object from an open PEP file object.

        pep_file is full text of the PEP file, filename is path of the PEP file, author_lookup is author exceptions file

        """
        self.filename: Path = filename

        # Parse the headers.
        pep_text = filename.read_text(encoding="utf-8")
        metadata = HeaderParser().parsestr(pep_text)
        required_header_misses = PEP.required_headers - set(metadata.keys())
        if required_header_misses:
            _raise_pep_error(
                self,
                f"PEP is missing required headers {required_header_misses}")

        try:
            self.number = int(metadata["PEP"])
        except ValueError:
            _raise_pep_error(self, "PEP number isn't an integer")

        # Check PEP number matches filename
        if self.number != int(filename.stem[4:]):
            _raise_pep_error(
                self,
                f"PEP number does not match file name ({filename})",
                pep_num=True)

        # Title
        self.title: str = metadata["Title"]

        # Type
        self.pep_type: str = metadata["Type"]
        if self.pep_type not in TYPE_VALUES:
            _raise_pep_error(self,
                             f"{self.pep_type} is not a valid Type value",
                             pep_num=True)

        # Status
        status = metadata["Status"]
        if status in SPECIAL_STATUSES:
            status = SPECIAL_STATUSES[status]
        if status not in STATUS_VALUES:
            _raise_pep_error(self,
                             f"{status} is not a valid Status value",
                             pep_num=True)

        # Special case for Active PEPs.
        if status == STATUS_ACTIVE and self.pep_type not in ACTIVE_ALLOWED:
            msg = "Only Process and Informational PEPs may have an Active status"
            _raise_pep_error(self, msg, pep_num=True)

        # Special case for Provisional PEPs.
        if status == STATUS_PROVISIONAL and self.pep_type != TYPE_STANDARDS:
            msg = "Only Standards Track PEPs may have a Provisional status"
            _raise_pep_error(self, msg, pep_num=True)
        self.status: str = status

        # Parse PEP authors
        self.authors: list[Author] = _parse_authors(self, metadata["Author"],
                                                    AUTHOR_OVERRIDES)

        # Topic (for sub-indices)
        _topic = metadata.get("Topic", "").lower().split(",")
        self.topic: set[str] = {
            topic
            for topic_raw in _topic if (topic := topic_raw.strip())
        }

        # Other headers
        self.created = metadata["Created"]
        self.discussions_to = metadata["Discussions-To"]
        self.python_version = metadata["Python-Version"]
        self.replaces = metadata["Replaces"]
        self.requires = metadata["Requires"]
        self.resolution = metadata["Resolution"]
        self.superseded_by = metadata["Superseded-By"]
        if metadata["Post-History"]:
            # Squash duplicate whitespace
            self.post_history = " ".join(metadata["Post-History"].split())
        else:
            self.post_history = None
def generate_report(header):
    r = {}
    n = HeaderParser().parsestr(header.strip())
    graph = []
    received = n.get_all('Received')
    if received:
        received = [i for i in received if ('from' in i or 'by' in i)]
    else:
        received = re.findall('Received:\s*(.*?)\n\S+:\s+', mail_data,
                              re.X | re.DOTALL | re.I)
    pprint.pprint(received)
    exit(1)
    c = len(received)
    for i in range(len(received)):
        if ';' in received[i]:
            line = received[i].split(';')
        else:
            line = received[i].split('\r\n')
        line = list(map(str.strip, line))
        line = [x.replace('\r\n', ' ') for x in line]
        try:
            if ';' in received[i + 1]:
                next_line = received[i + 1].split(';')
            else:
                next_line = received[i + 1].split('\r\n')
            next_line = list(map(str.strip, next_line))
            next_line = [x.replace('\r\n', '') for x in next_line]
        except IndexError:
            next_line = None

        org_time = dateParser(line[-1])
        if not next_line:
            next_time = org_time
        else:
            next_time = dateParser(next_line[-1])

        if line[0].startswith('from'):
            data = re.findall(
                """
                from\s+
                (.*?)\s+
                by(.*?)
                (?:
                    (?:with|via)
                    (.*?)
                    (?:\sid\s|$)
                    |\sid\s|$
                )""", line[0], re.DOTALL | re.X)
        else:
            data = re.findall(
                """
                ()by
                (.*?)
                (?:
                    (?:with|via)
                    (.*?)
                    (?:\sid\s|$)
                    |\sid\s
                )""", line[0], re.DOTALL | re.X)

        delay = (org_time - next_time).seconds
        if delay < 0:
            delay = 0

        try:
            ftime = org_time.utctimetuple()
            ftime = time.strftime('%m/%d/%Y %I:%M:%S %p', ftime)
            r[c] = {
                'Timestmp':
                org_time,
                'Time':
                ftime,
                'Delay':
                delay,
                'Direction':
                [x.replace('\n', ' ') for x in list(map(str.strip, data[0]))]
            }
            c -= 1
        except IndexError:
            pass

    for i in list(r.values()):
        if i['Direction'][0]:
            graph.append(["From: %s" % i['Direction'][0], i['Delay']])
        else:
            graph.append(["By: %s" % i['Direction'][1], i['Delay']])

    totalDelay = sum([x['Delay'] for x in list(r.values())])
    fTotalDelay = mha_tags.duration(totalDelay)
    delayed = True if totalDelay else False

    custom_style = Style(
        background='transparent',
        plot_background='transparent',
        font_family='googlefont:Open Sans',
        # title_font_size=12,
    )
    line_chart = pygal.HorizontalBar(style=custom_style,
                                     height=250,
                                     legend_at_bottom=True,
                                     tooltip_border_radius=10)
    line_chart.tooltip_fancy_mode = False
    line_chart.title = 'Total Delay is: %s' % fTotalDelay
    line_chart.x_title = 'Delay in seconds.'
    for i in graph:
        line_chart.add(i[0], i[1])
    chart = line_chart.render(is_unicode=True)

    summary = {
        'From': n.get('From') or getHeaderVal('from', mail_data),
        'To': n.get('to') or getHeaderVal('to', mail_data),
        'Cc': n.get('cc') or getHeaderVal('cc', mail_data),
        'Subject': n.get('Subject') or getHeaderVal('Subject', mail_data),
        'MessageID': n.get('Message-ID')
        or getHeaderVal('Message-ID', mail_data),
        'Date': n.get('Date') or getHeaderVal('Date', mail_data),
    }

    security_headers = [
        'Received-SPF', 'Authentication-Results', 'DKIM-Signature',
        'ARC-Authentication-Results'
    ]
    context = {
        'data': r,
        'delayed': delayed,
        'summary': summary,
        'n': n,
        'chart': chart,
        'security_headers': security_headers
    }
Exemple #17
0
def get_http_header(packet_header_chunk, header_name, default_value = ""):
    header_portion = "\n".join(packet_header_chunk.splitlines()[1:])
    headers = HeaderParser().parsestr(header_portion)
    return headers.get(header_name, default_value)
Exemple #18
0
def index():
    if request.method == 'POST':
        with open('freemail') as f:
            emailFree = [line.rstrip() for line in f]
        lista_IP = []
        mail_data = request.form['headers'].strip()
        r = {}
        n = HeaderParser().parsestr(mail_data)
        graph = []
        iP_Analizado = []
        received = n.get_all('Received')
        if received:
            received = [i for i in received if ('from' in i or 'by' in i)]
        else:
            received = re.findall('Received:\s*(.*?)\n\S+:\s+', mail_data,
                                  re.X | re.DOTALL | re.I)
        c = len(received)
        for i in range(len(received)):
            if ';' in received[i]:
                line = received[i].split(';')
            else:
                line = received[i].split('\r\n')
            line = list(map(str.strip, line))
            line = [x.replace('\r\n', ' ') for x in line]
            try:
                if ';' in received[i + 1]:
                    next_line = received[i + 1].split(';')
                else:
                    next_line = received[i + 1].split('\r\n')
                next_line = list(map(str.strip, next_line))
                next_line = [x.replace('\r\n', '') for x in next_line]
            except IndexError:
                next_line = None

            org_time = dateParser(line[-1])
            if not next_line:
                next_time = org_time
            else:
                next_time = dateParser(next_line[-1])

            if line[0].startswith('from'):
                data = re.findall(
                    """
                    from\s+
                    (.*?)\s+
                    by(.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s|$
                    )""", line[0], re.DOTALL | re.X)
                tmp_lista_IP = re.findall(
                    r"\[(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\]", line[0], re.X)
                for x in range(len(tmp_lista_IP)):
                    lista_IP.append(tmp_lista_IP[x])
            else:
                data = re.findall(
                    """
                    ()by
                    (.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:\sid\s|$)
                        |\sid\s
                    )""", line[0], re.DOTALL | re.X)

            delay = (org_time - next_time).seconds
            if delay <= 0:
                delay = 1

            try:
                ftime = org_time.utctimetuple()
                ftime = time.strftime('%m/%d/%Y %I:%M:%S %p', ftime)
                r[c] = {
                    'Timestmp':
                    org_time,
                    'Time':
                    ftime,
                    'Delay':
                    delay,
                    'Direction': [
                        x.replace('\n', ' ')
                        for x in list(map(str.strip, data[0]))
                    ]
                }
                c -= 1
            except IndexError:
                pass

        for i in list(r.values()):
            if i['Direction'][0]:
                graph.append(["From: %s" % i['Direction'][0], i['Delay']])
            else:
                graph.append(["By: %s" % i['Direction'][1], i['Delay']])

        totalDelay = sum([x['Delay'] for x in list(r.values())])
        fTotalDelay = utility_processor()['duration'](totalDelay)
        delayed = True if totalDelay else False

        custom_style = Style(
            background='transparent',
            plot_background='transparent',
            font_family='googlefont:Open Sans',
            # title_font_size=12,
        )
        line_chart = pygal.HorizontalBar(style=custom_style,
                                         height=250,
                                         legend_at_bottom=True,
                                         tooltip_border_radius=10)
        line_chart.tooltip_fancy_mode = False
        line_chart.title = 'Tiempo total: %s' % fTotalDelay
        line_chart.x_title = 'Tiempo en segundos.'
        for i in graph:
            line_chart.add(i[0], i[1])
        chart = line_chart.render(is_unicode=True)

        summary = {
            'From':
            n.get('From') or getHeaderVal('from', mail_data),
            'To':
            n.get('to') or getHeaderVal('to', mail_data),
            'Cc':
            n.get('cc') or getHeaderVal('cc', mail_data),
            'Subject':
            n.get('Subject') or getHeaderVal('Subject', mail_data),
            'MessageID':
            n.get('Message-ID') or getHeaderVal('Message-ID', mail_data),
            'Date':
            n.get('Date') or getHeaderVal('Date', mail_data),
            'Return':
            n.get('Return-Path') or getHeaderVal('Return-Path', mail_data),
        }

        security_headers = [
            'Received-SPF', 'Authentication-Results', 'DKIM-Signature',
            'ARC-Authentication-Results'
        ]

        for x in range(len(lista_IP)):
            web = 'http://ipinfo.io/' + lista_IP[x] + '/json'
            with urllib.request.urlopen(web) as url:
                datos_ip = json.loads(url.read().decode())
            if (('hostname' not in datos_ip)):
                datos_ip['hostname'] = 'Desconocido'
            if (('city' not in datos_ip)):
                datos_ip['city'] = 'Desconocida'
            if (('region' not in datos_ip)):
                datos_ip['region'] = 'Desconocida'
            if (('country' not in datos_ip)):
                datos_ip['country'] = 'Desconocido'
            if (('loc' not in datos_ip)):
                datos_ip['loc'] = '0,0'
            if (('org' not in datos_ip)):
                datos_ip['org'] = 'Desconocida'
            if (('postal' not in datos_ip)):
                datos_ip['postal'] = '0'
            iP_Analizado.append(
                Address(lista_IP[x], datos_ip['hostname'], datos_ip['city'],
                        datos_ip['region'], datos_ip['country'],
                        datos_ip['loc'], datos_ip['org'], datos_ip['postal']))

        try:
            email = n.get('From') or getHeaderVal('from', mail_data)
            d = email.split('@')[1].replace(">", "")
            if d in emailFree:
                summary['tipo'] = ' ( CUIDADO CORREO GRATUITO )'
            else:
                summary['tipo'] = 'Correo electronico normal'
            w = whois.query(d, ignore_returncode=1)
            if w:
                wd = w.__dict__
                for k, v in wd.items():
                    summary[k] = v  # SE RELLENAN LOS DATOS DE WHOIS  __DICT__
                    if k == 'creation_date':
                        fecha = datetime.today() - v
                        meses = round(fecha.days / 60)
                        if meses < 12:
                            summary['diff'] = ' ( PELIGRO ' + str(
                                meses) + ' MESES DE VIDA!!!! )'
                        else:
                            any = round(meses / 12)
                            summary['diff'] = str(any) + ' Años'
        except Exception as e:
            print(e)
            summary['name'] = 'ERROR AL BUSCAR'
            summary['creation_date'] = 'ERROR AL BUSCAR'
            summary['last_updated'] = 'ERROR AL BUSCAR'
            summary['expiration_date'] = 'ERROR AL BUSCAR'
            summary['name_servers'] = 'ERROR AL BUSCAR'

        analiza = n.get('Authentication-Results') or getHeaderVal(
            'Authentication-Results', mail_data)
        puntuacion = 0
        if analiza.find('spf=pass') >= 0:
            summary['SPF'] = 'OK.'
            puntuacion += 1
        else:
            if analiza.find('spf=') >= 0:
                summary['SPF'] = 'PELIGRO !!!!!! ( MUCHO CUIDADO )'
                puntuacion -= 2
            else:
                summary[
                    'SPF'] = ' SIN SEGURIDAD ( REVISA QUE EL EL ORIGEN Y A DONDE SE RETORNA EL MAIL )'

        if analiza.find('dkim=pass') >= 0:
            summary['DKIM'] = 'OK.'
            puntuacion += 1
        else:
            if analiza.find('dkim=') >= 0:
                summary['DKIM'] = 'PELIGRO !!!!!! ( MUCHO CUIDADO )'
                puntuacion -= 2
            else:
                summary[
                    'DKIM'] = ' SIN SEGURIDAD ( REVISA QUE EL EL ORIGEN Y A DONDE SE RETORNA EL MAIL )'

        if analiza.find('dmarc=pass') >= 0:
            summary['DMARC'] = 'OK.'
            puntuacion += 1
        else:
            if analiza.find('dmarc=') >= 0:
                summary['DMARC'] = 'PELIGRO !!!!!! ( MUCHO CUIDADO )'
                puntuacion -= 2
            else:
                summary[
                    'DMARC'] = ' SIN SEGURIDAD ( REVISA QUE EL EL ORIGEN Y A DONDE SE RETORNA EL MAIL )'

        summary['resultado_seguridad'] = str(round(
            (puntuacion / 3) * 100, 2)) + '%'

        return render_template('index.html',
                               data=r,
                               delayed=delayed,
                               summary=summary,
                               n=n,
                               chart=chart,
                               security_headers=security_headers,
                               iP_Analizado=iP_Analizado)
    else:
        return render_template('index.html')
from email.parser import HeaderParser
request_text = """asdfaklsdfasdf
asdasdfas: asdfasef
qwefqwefq: egrwergw"""
header_portion = "\n".join(request_text.splitlines()[1:])
headers = HeaderParser().parsestr(header_portion)
print(headers.get("asdfas","blahblah"))
Exemple #20
0
 def getSender(self):
     email_hdr = HeaderParser().parsestr(self.header, True)
     return parseaddr(email_hdr.get('FROM'))[1]
Exemple #21
0
class AboutFile(object):
    """
    Represent an ABOUT file and functions to parse and validate a file.
    """
    def __init__(self, location=None):
        self.about_resource = None
        self.location = location

        self.parsed = None
        self.parsed_fields = None
        self.validated_fields = {}

        # map _file fields to a resolved OS file system absolute location
        # this is not used at all for now
        self.file_fields_locations = {}

        self.warnings = []
        self.errors = []

        if self.location:
            self.parse()

    def __repr__(self):
        return repr((self.parsed, self.parsed_fields, self.validated_fields,))

    def parse(self):
        """
        Parse and validate a the file at self.location object in an ABOUT
        structure.
        """
        try:
            with open(self.location, 'rU') as file_in:
                # FIXME: we should open the file only once, it is always small
                # enough to be kept in memory
                no_blank_lines, pre_proc_warnings = self.pre_process(file_in)
                self.warnings.extend(pre_proc_warnings)
                # HeaderParser.parse returns the parsed file as keys and
                # values (allows for multiple keys, and it doesn't validate)
                self.parsed = HeaderParser().parse(no_blank_lines)
        except IOError as e:
            err_msg = 'Cannot read ABOUT file:' + repr(e)
            self.errors.append(Error(FILE, None, self.location, err_msg))
        except Exception as e:
            err_msg = 'Unknown ABOUT processing error:' + repr(e)
            self.errors.append(Error(UNKNOWN, None, self.location, err_msg))

        if self.parsed:
            self.warnings.extend(self.normalize())
            self.validate()

    def pre_process(self, file_in):
        """
        Pre-process an ABOUT file before using the email header parser.
        Return a tuple with a file-like object and a list of warnings.
        In the file-like object we remove:
         - blank/empty lines
         - invalid lines that cannot be parsed
         - spaces around the colon separator
        This also checks for field names with incorrect characters that could
        not be otherwise parsed.
        """
        # TODO: add line endings normalization to LF
        about_string = ''
        warnings = []
        last_line_is_field_or_continuation = False

        for line in file_in.readlines():
            # continuation line
            if line.startswith(' '):
                warn = self.check_line_continuation(
                    line, last_line_is_field_or_continuation)
                if last_line_is_field_or_continuation:
                    about_string += line
                if warn:
                    warnings.append(warn)
                continue

            # empty or blank line
            if not line.rstrip():
                last_line_is_field_or_continuation = False
                continue

            # From here, we should have a field line and consider not a field
            # line if there is no colon
            warn, has_colon = self.check_line_has_colon(line)
            if not has_colon:
                last_line_is_field_or_continuation = False
                warnings.append(warn)
                continue

            # invalid space characters
            splitted = line.split(':', 1)
            field_name = splitted[0].rstrip()
            warn = self.check_invalid_space_characters(field_name, line)
            if warn:
                last_line_is_field_or_continuation = False
                warnings.append(warn)
                continue
            else:
                line = field_name + ':' + splitted[1]

            # invalid field characters
            _invalid_chars, warn = (
                    check_invalid_chars(field_name, line))
            if warn:
                warnings.append(warn)
                last_line_is_field_or_continuation = False
                continue

            # finally add valid field lines
            last_line_is_field_or_continuation = True
            about_string += line

        # TODO: we should either yield and not return a stringIO or return a
        # string
        return StringIO(about_string), warnings

    @staticmethod
    def check_line_continuation(line, continuation):
        warnings = ''
        if not continuation:
            msg = 'Line does not contain a field or continuation: ignored.'
            warnings = Warn(IGNORED, None, line, msg)
        return warnings

    @staticmethod
    def check_line_has_colon(line):
        warnings = ''
        has_colon = True
        if ':' not in line:
            msg = 'Line does not contain a field: ignored.'
            warnings = Warn(IGNORED, None, line, msg)
            has_colon = False
        return warnings, has_colon

    @staticmethod
    def check_invalid_space_characters(field_name, line):
        warnings = ''
        if ' ' in field_name:
            msg = 'Field name contains spaces: line ignored.'
            warnings = Warn(IGNORED, field_name, line, msg)
        return warnings


    def normalize(self):
        """
        Convert field names to lower case. If a field name occurs multiple
        times, keep only the last occurrence.
        """
        warnings = []
        for field_name, value in self.parsed.items():
            field_name = field_name.lower()
            if field_name in self.validated_fields:
                field_value = self.validated_fields[field_name]
                msg = 'Duplicate field names found: ignored.'
                warnings.append(Warn(IGNORED, field_name, field_value, msg))
            # if this is a multi-line value, we want to strip the first space
            # of the continuation lines
            if '\n' in value:
                value = value.replace('\n ', '\n')
            self.validated_fields[field_name] = value
        return warnings

    def validate(self):
        """
        Validate a parsed about file.
        """
        invalid_name = self.invalid_chars_in_about_file_name(self.location)
        if invalid_name:
            msg = 'The filename contains invalid character.'
            self.errors.append(Error(ASCII, None, invalid_name, msg))
        dup_name = self.duplicate_file_names_when_lowercased(self.location)
        if dup_name:
            msg = 'Duplicated filename in the same directory detected.'
            self.errors.append(Error(FILE, None, dup_name, msg))
        self.validate_field_values_are_not_empty()
        self.validate_about_resource_exist()
        self.validate_mandatory_fields_are_present()

        for field_name, value in self.validated_fields.items():
            self.check_is_ascii(self.validated_fields.get(field_name))
            self.validate_file_field_exists(field_name, value)
            self.validate_url_field(field_name, network_check=False)
            self.validate_spdx_license(field_name, value)
            self.check_date_format(field_name)

    def validate_field_values_are_not_empty(self):
        for field_name, value in self.validated_fields.items():
            if value.strip():
                continue

            if field_name in MANDATORY_FIELDS:
                err = Error(VALUE, field_name, None,
                            'This mandatory field has no value.')
                self.errors.append(err)
            elif field_name in OPTIONAL_FIELDS:
                err = Warn(VALUE, field_name, None,
                           'This optional field has no value.')
                self.warnings.append(err)
            else:
                warn = Warn(VALUE, field_name, None,
                            'This field has no value.')
                self.warnings.append(warn)

    def _exists(self, file_path):
        """
        Return True if path exists.
        """
        if file_path:
            return os.path.exists(self._location(file_path))

    def _location(self, file_path):
        """
        Return absolute location for a posix file_path.
        """
        if file_path:
            file_path = os.path.join(os.path.dirname(self.location),
                                     file_path.strip())
            file_path = os.path.abspath(file_path)
        return file_path

    def _save_location(self, field_name, file_path):
        # TODO: we likely should not inject this in the validated fields and
        # maybe use something else for this
        self.file_fields_locations[field_name] = self._location(file_path)

    def validate_about_resource_exist(self):
        """
        Ensure that the resource referenced by the about_resource field
        exists.
        """
        about_resource = 'about_resource'
        # Note: a missing 'about_resource' field error will be caught in
        # validate_mandatory_fields_are_present(self)
        if (about_resource in self.validated_fields
            and self.validated_fields[about_resource]):
            self.about_resource = self.validated_fields[about_resource]

            if not self._exists(self.about_resource):
                self.errors.append(Error(FILE, about_resource,
                                         self.about_resource,
                                         'File does not exist.'))
        self._save_location(about_resource, self.about_resource)

    def validate_file_field_exists(self, field_name, file_path):
        """
        Ensure a _file field in the OPTIONAL_FIELDS points to an existing
        file.
        """
        if not field_name.endswith('_file'):
            return

        if not file_path:
            return

        if not field_name in OPTIONAL_FIELDS:
            return

        if not self._exists(file_path):
            self.warnings.append(Warn(FILE, field_name, file_path,
                                      'File does not exist.'))
            return

        self._save_location(field_name, file_path)

        try:
            with codecs.open(self._location(file_path),
                             'r', 'utf8', errors='replace') as f:
                # attempt to read the file to catch codec errors
                f.readlines()
        except Exception as e:
            self.errors.append(Error(FILE, field_name, file_path,
                                     'Cannot read file: %s' % repr(e)))
            return

    def validate_mandatory_fields_are_present(self):
        """
        Validate that mandatory fields are present.
        """
        for field_name in MANDATORY_FIELDS:
            if field_name not in self.validated_fields:
                self.errors.append(Error(VALUE, field_name, None,
                                         'Mandatory field missing'))

    def validate_known_optional_fields(self, field_name):
        """
        Validate which known optional fields are present.
        """
        if (field_name not in OPTIONAL_FIELDS
                and field_name not in MANDATORY_FIELDS
                and field_name not in FILE_LOCATIONS_FIELDS):
            msg = 'Not a mandatory or optional field'
            self.warnings.append(Warn(IGNORED, field_name,
                                      self.validated_fields[field_name],
                                      msg))

    def validate_spdx_license(self, field_name, field_value):
        if not field_name == 'license_spdx':
            return
        # FIXME: do we support more than one ID?
        # Not support multiple IDs
        spdx_id = field_value
        # valid id, matching the case
        if spdx_id in SPDX_LICENSE_IDS.values():
            return

        spdx_id_lower = spdx_id.lower()

        # conjunctions
        if spdx_id_lower in ['or', 'and']:
            return

        # lowercase check
        try:
            standard_id = SPDX_LICENSE_IDS[spdx_id_lower]
        except KeyError:
            self.errors.append(Error(SPDX, field_name, spdx_id,
                                     'Invalid SPDX license id.'))
        else:
            msg = ('Non standard SPDX license id case. Should be %r.'
                   % (standard_id))
            self.warnings.append(Warn(SPDX, field_name, id, msg))

    def validate_url_field(self, field_name, network_check=False):
        """
        Ensure that URL field is a valid URL. If network_check is True, do a
        network check to verify if it points to a live URL.
        """
        if (not field_name.endswith('_url')
            or field_name not in OPTIONAL_FIELDS):
            return

        # The "field is empty" warning will be thrown in the
        # "validate_field_values_are_not_empty"
        value = self.validated_fields[field_name]
        if not value:
            return

        try:
            is_url = self.check_url(value, network_check)
            if not is_url:
                msg = ('URL is not in a valid format or is not reachable.')
                self.warnings.append(Warn(URL, field_name, value, msg))
        except KeyError:
            return

    def check_is_ascii(self, s):
        """
        Return True if string is composed only of US-ASCII characters.
        """
        try:
            s.decode('ascii')
        except (UnicodeEncodeError, UnicodeDecodeError):
            msg = '%s is not valid US-ASCII.' % (s,)
            self.errors.append(Error(ASCII, s, None, msg))
            return False
        return True

    def check_date_format(self, field_name):
        """
        Return True if date_string has a valid date format: YYYY-MM-DD.
        """
        if field_name != 'date':
            return

        date_strings = self.validated_fields[field_name]
        if not date_strings:
            return

        supported_dateformat = '%Y-%m-%d'
        try:
            formatted = datetime.strptime(date_strings, supported_dateformat)
            return formatted
        except ValueError:
            msg = 'Unsupported date format, use YYYY-MM-DD.'
            self.warnings.append(Warn(DATE, field_name, date_strings, msg))
        return False

    def check_url(self, url, network_check=False):
        """
        Return True if a URL is valid. Optionally check that this is a live
        URL (using a HEAD request without downloading the whole file).
        """
        scheme, netloc, path, _p, _q, _frg = urlparse.urlparse(url)

        url_has_valid_format = scheme in ('http', 'https', 'ftp') and netloc
        if not url_has_valid_format:
            return False

        if network_check:
            if has_network_connectivity:
                # FIXME: HEAD request DO NOT WORK for ftp://
                return self.check_url_reachable(netloc, path)
            else:
                print('No network connection detected.')
        return url_has_valid_format

    @staticmethod
    def check_url_reachable(host, path):
        # FIXME: we are only checking netloc and path ... NOT the whole url
        # FXIME: this will not work with FTP
        try:
            conn = httplib.HTTPConnection(host)
            conn.request('HEAD', path)
        except (httplib.HTTPException, socket.error):
            return False
        else:
            # FIXME: we will consider a 404 as a valid status (True value)
            # This is the list of all the HTTP status code
            # http://en.wikipedia.org/wiki/List_of_HTTP_status_codes
            return conn.getresponse().status

    def get_custom_field_keys(self):
        custom_key = []
        for key in self.validated_fields:
            if key not in MANDATORY_FIELDS + OPTIONAL_FIELDS:
                custom_key.append(key)
        return custom_key

    def get_row_data(self, updated_path, custom_keys):
        """
        Create a csv compatible row of data for this object.
        """
        row = [updated_path]
        no_multi_license_fields = ('license_text_file',
                                    'license_spdx',
                                    'dje_license',
                                    'dje_license_name')
        for field in MANDATORY_FIELDS + OPTIONAL_FIELDS:
            if field in self.validated_fields:
                row += [self.validated_fields[field]]
                # The following code is to catch is the input contians any
                # multiple licenses
                if field in no_multi_license_fields:
                    for lic_field in no_multi_license_fields:
                        try:
                            if '\n' in self.validated_fields[lic_field]:
                                self.errors.append(Error(VALUE,
                                                         lic_field,
                                                         self.validated_fields[field],
                                                         "Multiple Licenses are not supported."))
                        except:
                            pass
            else:
                row += ['']

        # Add custom field value
        for key in custom_keys:
            try:
                row += [self.validated_fields[key]]
            except:
                row += ['']

        warnings = [repr(w) for w in self.warnings]
        errors = [repr(e) for e in self.errors]
        row += ['\n'.join(warnings), '\n'.join(errors)]
        return row

    @staticmethod
    def invalid_chars_in_about_file_name(file_path):
        """
        Return a sequence of invalid characters found in a file name.
        From spec 0.8.0:
            A file name can contain only these US-ASCII characters:
            <li> digits from 0 to 9 </li>
            <li> uppercase and lowercase letters from A to Z</li>
            <li> the _ underscore, - dash and . period signs. </li>
        """
        supported = string.digits + string.ascii_letters + '_-.+'
        # Using the resource_name(file_path) will yield the following error on
        # windows:
        # Field: None, Value: [':', '\\', '\\', '\\', '\\', '\\', '\\'],
        # Message: The filename contains invalid character.
        # Perhaps it is better to simply use the os.path.basename(file_path)
        # file_name = resource_name(file_path)
        file_name = os.path.basename(file_path)
        return [char for char in file_name if char not in supported]

    @staticmethod
    def duplicate_file_names_when_lowercased(file_location):
        """
        Return a sequence of duplicate file names in the same directory as
        file_location when lower cased.
        From spec 0.8.0:
            The case of a file name is not significant. On case-sensitive file
            systems (such as Linux), a tool must raise an error if two ABOUT
            files stored in the same directory have the same lowercase file
            name.
        """
        # TODO: Add a test, only for a case sensitive FS, such as on Linux
        names = []
        for name in os.listdir(os.path.dirname(file_location)):
            if name.lower() in names:
                names.append(name)
        return names

    def license_text(self):
        """
        Return the license text if the license_text_file field exists and the
        field value (file) exists.
        """
        location = self.file_fields_locations.get('license_text_file',)
        if location and os.path.exists(location):
            try:
                with open(location, 'rU') as f:
                    return f.read()
            except Exception :
                pass
        return ''

    def notice_text(self):
        """
        Return the text in a notice file if the notice_file field exists in a
        .ABOUT file and the file that is in the notice_file field exists
        """
        location = self.file_fields_locations.get('notice_file', '')
        if location:
            try:
                with open(location, 'rU') as f:
                    return f.read()
            except Exception:
                pass
        return ''

    def get_about_name(self):
        """
        Return the about object's name.
        """
        return self.parsed.get('name', '')

    def get_dje_license_name(self):
        """
        Return the about object's dje_license_name.
        """
        return self.parsed.get('dje_license_name', '')
Exemple #22
0
def get_http_header(packet_header_chunk, header_name, default_value = ""):
    newline_position = packet_header_chunk.find("\n")
    header_portion = packet_header_chunk[newline_position+1:]
    headers = HeaderParser().parsestr(header_portion)
    return headers.get(header_name, default_value)
Exemple #23
0
    async def pep_command(self, ctx: Context, pep_number: str) -> None:
        """Fetches information about a PEP and sends it to the channel."""
        if pep_number.isdigit():
            pep_number = int(pep_number)
        else:
            await ctx.send_help(ctx.command)
            return

        # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
        if pep_number == 0:
            return await self.send_pep_zero(ctx)

        possible_extensions = ['.txt', '.rst']
        found_pep = False
        for extension in possible_extensions:
            # Attempt to fetch the PEP
            pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
            log.trace(f"Requesting PEP {pep_number} with {pep_url}")
            response = await self.bot.http_session.get(pep_url)

            if response.status == 200:
                log.trace("PEP found")
                found_pep = True

                pep_content = await response.text()

                # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
                pep_header = HeaderParser().parse(StringIO(pep_content))

                # Assemble the embed
                pep_embed = Embed(
                    title=f"**PEP {pep_number} - {pep_header['Title']}**",
                    description=f"[Link]({self.base_pep_url}{pep_number:04})",
                )

                pep_embed.set_thumbnail(url=ICON_URL)

                # Add the interesting information
                fields_to_check = ("Status", "Python-Version", "Created",
                                   "Type")
                for field in fields_to_check:
                    # Check for a PEP metadata field that is present but has an empty value
                    # embed field values can't contain an empty string
                    if pep_header.get(field, ""):
                        pep_embed.add_field(name=field,
                                            value=pep_header[field])

            elif response.status != 404:
                # any response except 200 and 404 is expected
                found_pep = True  # actually not, but it's easier to display this way
                log.trace(
                    f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
                    f"{response.status}.\n{response.text}")

                error_message = "Unexpected HTTP error during PEP search. Please let us know."
                pep_embed = Embed(title="Unexpected error",
                                  description=error_message)
                pep_embed.colour = Colour.red()
                break

        if not found_pep:
            log.trace("PEP was not found")
            not_found = f"PEP {pep_number} does not exist."
            pep_embed = Embed(title="PEP not found", description=not_found)
            pep_embed.colour = Colour.red()

        await ctx.message.channel.send(embed=pep_embed)
Exemple #24
0
def index():
    if request.method == 'POST':
        data = request.form['headers'].strip()
        r = {}
        n = HeaderParser().parsestr(data)
        graph = []
        received = n.get_all('Received')
        if received:
            received = [i for i in received if ('from' in i or 'by' in i)]
        c = len(received)
        for i in range(len(received)):
            if ';' in received[i]:
                line = received[i].split(';')
            else:
                line = received[i].split('\r\n')
            line = list(map(str.strip, line))
            line = [x.replace('\r\n', '') for x in line]
            try:
                if ';' in received[i + 1]:
                    next_line = received[i + 1].split(';')
                else:
                    next_line = received[i + 1].split('\r\n')
                next_line = list(map(str.strip, next_line))
                next_line = [x.replace('\r\n', '') for x in next_line]
            except IndexError:
                next_line = None

            org_time = dateParser(line[1])
            if not next_line:
                next_time = org_time
            else:
                next_time = dateParser(next_line[1])

            if line[0].startswith('from'):
                data = re.findall(
                    """
                    from\s+
                    (.*?)\s+
                    by(.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:id|$)
                        |id|$
                    )""", line[0], re.DOTALL | re.X)
            else:
                data = re.findall(
                    """
                    ()by
                    (.*?)
                    (?:
                        (?:with|via)
                        (.*?)
                        (?:id|$)
                        |id
                    )""", line[0], re.DOTALL | re.X)

            delay = org_time.second - next_time.second
            if delay < 0:
                delay = 0

            try:
                ftime = org_time.isoformat(' ')
                r[c] = {
                    'Timestmp': org_time,
                    'Time': ftime,
                    'Delay': delay,
                    'Direction': [x.replace('\n', ' ') for x in list(map(str.strip, data[0]))]
                }
                c -= 1
            except IndexError:
                pass

        for i in list(r.values()):
            if i['Direction'][0]:
                graph.append(["From: %s" % i['Direction'][0], i['Delay']])
            else:
                graph.append(["By: %s" % i['Direction'][1], i['Delay']])

        totalDelay = sum([x['Delay'] for x in list(r.values())])
        fTotalDelay = utility_processor()['duration'](totalDelay)
        delayed = True if totalDelay else False

        custom_style = Style(
            background='transparent',
            plot_background='transparent',
            font_family='googlefont:Open Sans',
            title_font_size=12,
        )
        line_chart = pygal.HorizontalBar(
            style=custom_style, height=250, legend_at_bottom=True,
            tooltip_border_radius=10)
        line_chart.tooltip_fancy_mode = False
        line_chart.title = 'Total Delay is: %s' % fTotalDelay
        line_chart.x_title = 'Delay in seconds.'
        for i in graph:
            line_chart.add(i[0], i[1])
        chart = line_chart.render(is_unicode=True)

        summary = {
            'From': n.get('from'),
            'To': n.get('to'),
            'Cc': n.get('cc'),
            'Subject': n.get('Subject'),
            'MessageID': n.get('Message-ID'),
            'Date': n.get('Date'),
        }
        return render_template(
            'index.html', data=r, delayed=delayed, summary=summary,
            n=n, chart=chart)
    else:
        return render_template('index.html')