def _really_load(self, f, filename, ignore_discard, ignore_expires):
        now = time.time()

        magic = f.readline()
        if not re.search(self.magic_re, magic):
            f.close()
            raise LoadError(
                "%s does not look like a Netscape format cookies file" %
                filename)

        try:
            while 1:
                line = f.readline()
                if line == "": break

                # last field may be absent, so keep any trailing tab
                if endswith(line, "\n"): line = line[:-1]

                # skip comments and blank lines XXX what is $ for?
                if (startswith(string.strip(line), "#")
                        or startswith(string.strip(line), "$")
                        or string.strip(line) == ""):
                    continue

                domain, domain_specified, path, secure, expires, name, value = \
                        string.split(line, "\t")
                secure = (secure == "TRUE")
                domain_specified = (domain_specified == "TRUE")
                if name == "":
                    name = value
                    value = None

                initial_dot = startswith(domain, ".")
                assert domain_specified == initial_dot

                discard = False
                if expires == "":
                    expires = None
                    discard = True

                # assume path_specified is false
                c = Cookie(0, name, value, None, False, domain,
                           domain_specified, initial_dot, path, False, secure,
                           expires, discard, None, None, {})
                if not ignore_discard and c.discard:
                    continue
                if not ignore_expires and c.is_expired(now):
                    continue
                self.set_cookie(c)

        except:
            reraise_unmasked_exceptions((IOError, ))
            raise LoadError("invalid Netscape format file %s: %s" %
                            (filename, line))
    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
        if filename is None:
            if self.filename is not None: filename = self.filename
            else: raise ValueError(MISSING_FILENAME_TEXT)

        f = open(filename, "w")
        try:
            f.write(self.header)
            now = time.time()
            debug("Saving Netscape cookies.txt file")
            for cookie in self:
                if not ignore_discard and cookie.discard:
                    debug("   Not saving %s: marked for discard" % cookie.name)
                    continue
                if not ignore_expires and cookie.is_expired(now):
                    debug("   Not saving %s: expired" % cookie.name)
                    continue
                if cookie.secure: secure = "TRUE"
                else: secure = "FALSE"
                if startswith(cookie.domain, "."): initial_dot = "TRUE"
                else: initial_dot = "FALSE"
                if cookie.expires is not None:
                    expires = str(cookie.expires)
                else:
                    expires = ""
                if cookie.name is not None:
                    name = cookie.name
                else:
                    name = ""
                f.write(
                    string.join([cookie.domain, initial_dot, cookie.path,
                                 secure, expires, name, cookie.value], "\t")+
                    "\n")
        finally:
            f.close()
Beispiel #3
0
def parse_ns_headers(ns_headers):
    """Ad-hoc parser for Netscape protocol cookie-attributes.

    The old Netscape cookie format for Set-Cookie can for instance contain
    an unquoted "," in the expires field, so we have to use this ad-hoc
    parser instead of split_header_words.

    XXX This may not make the best possible effort to parse all the crap
    that Netscape Cookie headers contain.  Ronald Tschalar's HTTPClient
    parser is probably better, so could do worse than following that if
    this ever gives any trouble.

    Currently, this is also used for parsing RFC 2109 cookies.

    """
    known_attrs = (
        "expires",
        "domain",
        "path",
        "secure",
        # RFC 2109 attrs (may turn up in Netscape cookies, too)
        "port",
        "max-age")

    result = []
    for ns_header in ns_headers:
        pairs = []
        version_set = False
        params = re.split(r";\s*", ns_header)
        for ii in range(len(params)):
            param = params[ii]
            param = string.rstrip(param)
            if param == "": continue
            if "=" not in param:
                k, v = param, None
            else:
                k, v = re.split(r"\s*=\s*", param, 1)
                k = string.lstrip(k)
            if ii != 0:
                lc = string.lower(k)
                if lc in known_attrs:
                    k = lc
                if k == "version":
                    # This is an RFC 2109 cookie.
                    version_set = True
                if k == "expires":
                    # convert expires date to seconds since epoch
                    if startswith(v, '"'): v = v[1:]
                    if endswith(v, '"'): v = v[:-1]
                    v = http2time(v)  # None if invalid
            pairs.append((k, v))

        if pairs:
            if not version_set:
                pairs.append(("version", "0"))
            result.append(pairs)

    return result
Beispiel #4
0
def parse_ns_headers(ns_headers):
    """Ad-hoc parser for Netscape protocol cookie-attributes.

    The old Netscape cookie format for Set-Cookie can for instance contain
    an unquoted "," in the expires field, so we have to use this ad-hoc
    parser instead of split_header_words.

    XXX This may not make the best possible effort to parse all the crap
    that Netscape Cookie headers contain.  Ronald Tschalar's HTTPClient
    parser is probably better, so could do worse than following that if
    this ever gives any trouble.

    Currently, this is also used for parsing RFC 2109 cookies.

    """
    known_attrs = ("expires", "domain", "path", "secure",
                   # RFC 2109 attrs (may turn up in Netscape cookies, too)
                   "port", "max-age")

    result = []
    for ns_header in ns_headers:
        pairs = []
        version_set = False
        for param in re.split(r";\s*", ns_header):
            param = string.rstrip(param)
            if param == "": continue
            if "=" not in param:
                if string.lower(param) in known_attrs:
                    k, v = param, None
                else:
                    # cookie with missing name
                    k, v = None, param
            else:
                k, v = re.split(r"\s*=\s*", param, 1)
                k = string.lstrip(k)
            if k is not None:
                lc = string.lower(k)
                if lc in known_attrs:
                    k = lc
                if k == "version":
                    # This is an RFC 2109 cookie.  Will be treated as RFC 2965
                    # cookie in rest of code.
                    # Probably it should be parsed with split_header_words, but
                    # that's too much hassle.
                    version_set = True
                if k == "expires":
                    # convert expires date to seconds since epoch
                    if startswith(v, '"'): v = v[1:]
                    if endswith(v, '"'): v = v[:-1]
                    v = http2time(v)  # None if invalid
            pairs.append((k, v))

        if pairs:
            if not version_set:
                pairs.append(("version", "0"))
            result.append(pairs)

    return result
    def load_cookie_data(self,
                         filename,
                         ignore_discard=False,
                         ignore_expires=False):
        """Load cookies from file containing actual cookie data.

        Old cookies are kept unless overwritten by newly loaded ones.

        You should not call this method if the delayload attribute is set.

        I think each of these files contain all cookies for one user, domain,
        and path.

        filename: file containing cookies -- usually found in a file like
         C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt

        """
        now = int(time.time())

        cookie_data = self._load_cookies_from_file(filename)

        for cookie in cookie_data:
            flags = cookie["FLAGS"]
            secure = ((flags & 0x2000) != 0)
            filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
            expires = epoch_time_offset_from_win32_filetime(filetime)
            if expires < now:
                discard = True
            else:
                discard = False
            domain = cookie["DOMAIN"]
            initial_dot = startswith(domain, ".")
            if initial_dot:
                domain_specified = True
            else:
                # MSIE 5 does not record whether the domain cookie-attribute
                # was specified.
                # Assuming it wasn't is conservative, because with strict
                # domain matching this will match less frequently; with regular
                # Netscape tail-matching, this will match at exactly the same
                # times that domain_specified = True would.  It also means we
                # don't have to prepend a dot to achieve consistency with our
                # own & Mozilla's domain-munging scheme.
                domain_specified = False

            # assume path_specified is false
            # XXX is there other stuff in here? -- eg. comment, commentURL?
            c = Cookie(0, cookie["KEY"], cookie["VALUE"], None, False, domain,
                       domain_specified, initial_dot, cookie["PATH"], False,
                       secure, expires, discard, None, None, {"flags": flags})
            if not ignore_discard and c.discard:
                continue
            if not ignore_expires and c.is_expired(now):
                continue
            self.set_cookie(c)
Beispiel #6
0
    def add_handler(self, handler):
        added = False
        for meth in methnames(handler):
            i = string.find(meth, "_")
            protocol = meth[:i]
            condition = meth[i + 1:]

            if startswith(condition, "error"):
                j = string.find(meth[i + 1:], "_") + i + 1
                kind = meth[j + 1:]
                try:
                    kind = int(kind)
                except ValueError:
                    pass
                lookup = self.handle_error.get(protocol, {})
                self.handle_error[protocol] = lookup
            elif (condition == "open"
                  and protocol not in ["do", "proxy"]):  # hack -- see below
                kind = protocol
                lookup = self.handle_open
            elif (condition in ["response", "request"]
                  and protocol != "redirect"):  # yucky hack
                # hack above is to fix HTTPRedirectHandler problem, which
                # appears to above line to be a processor because of the
                # redirect_request method :-((
                kind = protocol
                lookup = getattr(self, "process_" + condition)
            else:
                continue

            if lookup.has_key(kind):
                bisect.insort(lookup[kind], handler)
            else:
                lookup[kind] = [handler]
            added = True
            continue

        if added:
            # XXX why does self.handlers need to be sorted?
            bisect.insort(self.handlers, handler)
            handler.add_parent(self)
Beispiel #7
0
    def add_handler(self, handler):
        added = False
        for meth in methnames(handler):
            i = string.find(meth, "_")
            protocol = meth[:i]
            condition = meth[i+1:]

            if startswith(condition, "error"):
                j = string.find(meth[i+1:], "_") + i + 1
                kind = meth[j+1:]
                try:
                    kind = int(kind)
                except ValueError:
                    pass
                lookup = self.handle_error.get(protocol, {})
                self.handle_error[protocol] = lookup
            elif (condition == "open" and
                  protocol not in ["do", "proxy"]):  # hack -- see below
                kind = protocol
                lookup = self.handle_open
            elif (condition in ["response", "request"] and
                  protocol != "redirect"):  # yucky hack
                # hack above is to fix HTTPRedirectHandler problem, which
                # appears to above line to be a processor because of the
                # redirect_request method :-((
                kind = protocol
                lookup = getattr(self, "process_"+condition)
            else:
                continue

            if lookup.has_key(kind):
                bisect.insort(lookup[kind], handler)
            else:
                lookup[kind] = [handler]
            added = True
            continue

        if added:
            # XXX why does self.handlers need to be sorted?
            bisect.insort(self.handlers, handler)
            handler.add_parent(self)
    def _really_load(self, f, filename, ignore_discard, ignore_expires):
        magic = f.readline()
        if not re.search(self.magic_re, magic):
            msg = "%s does not seem to contain cookies" % filename
            raise LoadError(msg)

        now = time.time()

        header = "Set-Cookie3:"
        boolean_attrs = ("port_spec", "path_spec", "domain_dot",
                         "secure", "discard", "rfc2109")
        value_attrs = ("version",
                       "port", "path", "domain",
                       "expires",
                       "comment", "commenturl")

        try:
            while 1:
                line = f.readline()
                if line == "": break
                if not startswith(line, header):
                    continue
                line = string.strip(line[len(header):])

                for data in split_header_words([line]):
                    name, value = data[0]
                    standard = {}
                    rest = {}
                    for k in boolean_attrs:
                        standard[k] = False
                    for k, v in data[1:]:
                        if k is not None:
                            lc = string.lower(k)
                        else:
                            lc = None
                        # don't lose case distinction for unknown fields
                        if (lc in value_attrs) or (lc in boolean_attrs):
                            k = lc
                        if k in boolean_attrs:
                            if v is None: v = True
                            standard[k] = v
                        elif k in value_attrs:
                            standard[k] = v
                        else:
                            rest[k] = v

                    h = standard.get
                    expires = h("expires")
                    discard = h("discard")
                    if expires is not None:
                        expires = iso2time(expires)
                    if expires is None:
                        discard = True
                    domain = h("domain")
                    domain_specified = startswith(domain, ".")
                    c = Cookie(h("version"), name, value,
                               h("port"), h("port_spec"),
                               domain, domain_specified, h("domain_dot"),
                               h("path"), h("path_spec"),
                               h("secure"),
                               expires,
                               discard,
                               h("comment"),
                               h("commenturl"),
                               rest,
                               h("rfc2109"),
                               ) 
                    if not ignore_discard and c.discard:
                        continue
                    if not ignore_expires and c.is_expired(now):
                        continue
                    self.set_cookie(c)
        except:
            reraise_unmasked_exceptions((IOError,))
            raise LoadError("invalid Set-Cookie3 format file %s" % filename)
Beispiel #9
0
def split_header_words(header_values):
    r"""Parse header values into a list of lists containing key,value pairs.

    The function knows how to deal with ",", ";" and "=" as well as quoted
    values after "=".  A list of space separated tokens are parsed as if they
    were separated by ";".

    If the header_values passed as argument contains multiple values, then they
    are treated as if they were a single value separated by comma ",".

    This means that this function is useful for parsing header fields that
    follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
    the requirement for tokens).

      headers           = #header
      header            = (token | parameter) *( [";"] (token | parameter))

      token             = 1*<any CHAR except CTLs or separators>
      separators        = "(" | ")" | "<" | ">" | "@"
                        | "," | ";" | ":" | "\" | <">
                        | "/" | "[" | "]" | "?" | "="
                        | "{" | "}" | SP | HT

      quoted-string     = ( <"> *(qdtext | quoted-pair ) <"> )
      qdtext            = <any TEXT except <">>
      quoted-pair       = "\" CHAR

      parameter         = attribute "=" value
      attribute         = token
      value             = token | quoted-string

    Each header is represented by a list of key/value pairs.  The value for a
    simple token (not part of a parameter) is None.  Syntactically incorrect
    headers will not necessarily be parsed as you would want.

    This is easier to describe with some examples:

    >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
    [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
    >>> split_header_words(['text/html; charset="iso-8859-1"'])
    [[('text/html', None), ('charset', 'iso-8859-1')]]
    >>> split_header_words([r'Basic realm="\"foo\bar\""'])
    [[('Basic', None), ('realm', '"foobar"')]]

    """
    assert type(header_values) not in STRING_TYPES
    result = []
    for text in header_values:
        orig_text = text
        pairs = []
        while text:
            m = token_re.search(text)
            if m:
                text = unmatched(m)
                name = m.group(1)
                m = quoted_value_re.search(text)
                if m:  # quoted value
                    text = unmatched(m)
                    value = m.group(1)
                    value = escape_re.sub(r"\1", value)
                else:
                    m = value_re.search(text)
                    if m:  # unquoted value
                        text = unmatched(m)
                        value = m.group(1)
                        value = string.rstrip(value)
                    else:
                        # no value, a lone token
                        value = None
                pairs.append((name, value))
            elif startswith(string.lstrip(text), ","):
                # concatenated headers, as per RFC 2616 section 4.2
                text = string.lstrip(text)[1:]
                if pairs: result.append(pairs)
                pairs = []
            else:
                # skip junk
                non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
                assert nr_junk_chars > 0, (
                    "split_header_words bug: '%s', '%s', %s" %
                    (orig_text, text, pairs))
                text = non_junk
        if pairs: result.append(pairs)
    return result
Beispiel #10
0
def split_header_words(header_values):
    r"""Parse header values into a list of lists containing key,value pairs.

    The function knows how to deal with ",", ";" and "=" as well as quoted
    values after "=".  A list of space separated tokens are parsed as if they
    were separated by ";".

    If the header_values passed as argument contains multiple values, then they
    are treated as if they were a single value separated by comma ",".

    This means that this function is useful for parsing header fields that
    follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
    the requirement for tokens).

      headers           = #header
      header            = (token | parameter) *( [";"] (token | parameter))

      token             = 1*<any CHAR except CTLs or separators>
      separators        = "(" | ")" | "<" | ">" | "@"
                        | "," | ";" | ":" | "\" | <">
                        | "/" | "[" | "]" | "?" | "="
                        | "{" | "}" | SP | HT

      quoted-string     = ( <"> *(qdtext | quoted-pair ) <"> )
      qdtext            = <any TEXT except <">>
      quoted-pair       = "\" CHAR

      parameter         = attribute "=" value
      attribute         = token
      value             = token | quoted-string

    Each header is represented by a list of key/value pairs.  The value for a
    simple token (not part of a parameter) is None.  Syntactically incorrect
    headers will not necessarily be parsed as you would want.

    This is easier to describe with some examples:

    >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
    [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
    >>> split_header_words(['text/html; charset="iso-8859-1"'])
    [[('text/html', None), ('charset', 'iso-8859-1')]]
    >>> split_header_words([r'Basic realm="\"foo\bar\""'])
    [[('Basic', None), ('realm', '"foobar"')]]

    """
    assert type(header_values) not in STRING_TYPES
    result = []
    for text in header_values:
        orig_text = text
        pairs = []
        while text:
            m = token_re.search(text)
            if m:
                text = unmatched(m)
                name = m.group(1)
                m = quoted_value_re.search(text)
                if m:  # quoted value
                    text = unmatched(m)
                    value = m.group(1)
                    value = escape_re.sub(r"\1", value)
                else:
                    m = value_re.search(text)
                    if m:  # unquoted value
                        text = unmatched(m)
                        value = m.group(1)
                        value = string.rstrip(value)
                    else:
                        # no value, a lone token
                        value = None
                pairs.append((name, value))
            elif startswith(string.lstrip(text), ","):
                # concatenated headers, as per RFC 2616 section 4.2
                text = string.lstrip(text)[1:]
                if pairs: result.append(pairs)
                pairs = []
            else:
                # skip junk
                non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
                assert nr_junk_chars > 0, (
                    "split_header_words bug: '%s', '%s', %s" %
                    (orig_text, text, pairs))
                text = non_junk
        if pairs: result.append(pairs)
    return result
    def load_cookie_data(self, filename,
                         ignore_discard=False, ignore_expires=False):
        """Load cookies from file containing actual cookie data.

        Old cookies are kept unless overwritten by newly loaded ones.

        You should not call this method if the delayload attribute is set.

        I think each of these files contain all cookies for one user, domain,
        and path.

        filename: file containing cookies -- usually found in a file like
         C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt

        """
        now = int(time.time())

        cookie_data = self._load_cookies_from_file(filename)

        for cookie in cookie_data:
            flags = cookie["FLAGS"]
            secure = ((flags & 0x2000) != 0)
            filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
            expires = epoch_time_offset_from_win32_filetime(filetime)
            if expires < now:
                discard = True
            else:
                discard = False
            domain = cookie["DOMAIN"]
            initial_dot = startswith(domain, ".")
            if initial_dot:
                domain_specified = True
            else:
                # MSIE 5 does not record whether the domain cookie-attribute
                # was specified.
                # Assuming it wasn't is conservative, because with strict
                # domain matching this will match less frequently; with regular
                # Netscape tail-matching, this will match at exactly the same
                # times that domain_specified = True would.  It also means we
                # don't have to prepend a dot to achieve consistency with our
                # own & Mozilla's domain-munging scheme.
                domain_specified = False

            # assume path_specified is false
            # XXX is there other stuff in here? -- eg. comment, commentURL?
            c = Cookie(0,
                       cookie["KEY"], cookie["VALUE"],
                       None, False,
                       domain, domain_specified, initial_dot,
                       cookie["PATH"], False,
                       secure,
                       expires,
                       discard,
                       None,
                       None,
                       {"flags": flags})
            if not ignore_discard and c.discard:
                continue
            if not ignore_expires and c.is_expired(now):
                continue
            CookieJar.set_cookie(self, c)
    def _really_load(self, f, filename, ignore_discard, ignore_expires):
        now = time.time()

        magic = f.readline()
        if not re.search(self.magic_re, magic):
            f.close()
            raise LoadError(
                "%s does not look like a Netscape format cookies file" %
                filename)

        try:
            while 1:
                line = f.readline()
                if line == "": break

                # last field may be absent, so keep any trailing tab
                if endswith(line, "\n"): line = line[:-1]

                # skip comments and blank lines XXX what is $ for?
                if (startswith(string.strip(line), "#") or
                    startswith(string.strip(line), "$") or
                    string.strip(line) == ""):
                    continue

                domain, domain_specified, path, secure, expires, name, value = \
                        string.split(line, "\t")
                secure = (secure == "TRUE")
                domain_specified = (domain_specified == "TRUE")
                if name == "":
                    name = value
                    value = None

                initial_dot = startswith(domain, ".")
                assert domain_specified == initial_dot

                discard = False
                if expires == "":
                    expires = None
                    discard = True

                # assume path_specified is false
                c = Cookie(0, name, value,
                           None, False,
                           domain, domain_specified, initial_dot,
                           path, False,
                           secure,
                           expires,
                           discard,
                           None,
                           None,
                           {})
                if not ignore_discard and c.discard:
                    continue
                if not ignore_expires and c.is_expired(now):
                    continue
                self.set_cookie(c)

        except:
            reraise_unmasked_exceptions((IOError,))
            raise LoadError("invalid Netscape format file %s: %s" %
                          (filename, line))