示例#1
0
 def test_bad_precond(self):
     adict=23
     try:
         assertutil.precondition(isinstance(adict, dict), "adict is required to be a dict.", 23, adict=adict, foo=None)
     except AssertionError as le:
         self.assertTrue(le.args[
                             0] == "precondition: 'adict is required to be a dict.' <type 'str'>, 23 <type 'int'>, foo: None <type 'NoneType'>, 'adict': 23 <type 'int'>")
示例#2
0
    def search(self, lat, lon, radius=None, query=None, category=None):
        """Search for places near a lat/lon, within a radius (in kilometers)."""
        precondition(is_valid_lat(lat), lat)
        precondition(is_valid_lon(lon), lon)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring), category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = { }
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search', lat=lat, lon=lon)

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#3
0
 def get_context_by_ip(self, ipaddr):
     """ The server uses guesses the latitude and longitude from
     the ipaddr and then does the same thing as get_context(),
     using that guessed latitude and longitude."""
     precondition(is_valid_ip(ipaddr), ipaddr)
     endpoint = self._endpoint('context_by_ip', ip=ipaddr)
     return json_decode(self._request(endpoint, "GET")[1])
示例#4
0
def a2b(cs):
    """
    @param cs the base-32 encoded data (a string)
    """
    precondition(could_be_base32_encoded(cs), "cs is required to be possibly base32 encoded data.", cs=cs)

    return a2b_l(cs, num_octets_that_encode_to_this_many_quintets(len(cs))*8)
示例#5
0
    def search(self, lat, lon, radius=None, query=None, category=None):
        """Search for places near a lat/lon, within a radius (in kilometers)."""
        precondition(is_valid_lat(lat), lat)
        precondition(is_valid_lon(lon), lon)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring),
                     category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = {}
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search', lat=lat, lon=lon)

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#6
0
def crypto_dict_to_id(adict):
    """
    @precondition: adict is required to be a dict.: isinstance(adict, dict)
    """
    precondition(isinstance(adict, dict), "adict is required to be a dict.", adict=adict)

    return idlib.make_id(mencode.mencode(adict['pubkey']), 'broker')
示例#7
0
def skip_if_cannot_represent_argv(u):
    precondition(isinstance(u, unicode))
    try:
        u.encode(get_io_encoding())
    except UnicodeEncodeError:
        raise unittest.SkipTest(
            "A non-ASCII argv could not be encoded on this platform.")
示例#8
0
def py_xor(str1, str2):
    warnings.warn("deprecated", DeprecationWarning)
    precondition(len(str1) == len(str2),
                 "str1 and str2 are required to be of the same length.",
                 str1=str1,
                 str2=str2)

    if len(str1) % 4 == 0:
        a1 = array.array('i', str1)
        a2 = array.array('i', str2)
        for i in range(len(a1)):
            a2[i] = a2[i] ^ a1[i]
    elif len(str1) % 2 == 0:
        a1 = array.array('h', str1)
        a2 = array.array('h', str2)
        for i in range(len(a1)):
            a2[i] = a2[i] ^ a1[i]
    else:
        a1 = array.array('b', str1)
        a2 = array.array('b', str2)
        for i in range(len(a1)):
            a2[i] = a2[i] ^ a1[i]

    if hasattr(a2, 'tobytes'):  # PY3
        return a2.tobytes()
    else:  # PY2
        return a2.tostring()
    def get_feature(self, simplegeohandle):
        """
        Return the GeoJSON representation of a feature.

        Return a deferred which, if the request succeeds, eventually
        fires with the Feature object. If the request fails, the
        deferred instead errbacks with the twisted.web.client.Response
        object.
        """
        precondition(is_simplegeohandle(simplegeohandle), "simplegeohandle is required to match the regex %s" % SIMPLEGEOHANDLE_RSTR, simplegeohandle=simplegeohandle)
        endpoint = self._endpoint('feature', simplegeohandle=simplegeohandle)
        d = self._request(endpoint, 'GET')
        def _handle_resp(resp):
            if (resp.code / 100) not in (2, 3):
                return Failure(resp)

            d2 = get_body(resp)
            def _handle_body(body):
                f = Feature.from_json(body)
                f._http_response = resp
                return f

            d2.addCallback(_handle_body)
            return d2
        d.addCallback(_handle_resp)
        return d
示例#10
0
 def get_context_by_ip(self, ipaddr):
     """ The server uses guesses the latitude and longitude from
     the ipaddr and then does the same thing as get_context(),
     using that guessed latitude and longitude."""
     precondition(is_valid_ip(ipaddr), ipaddr)
     endpoint = self._endpoint('context_by_ip', ip=ipaddr)
     return json_decode(self._request(endpoint, "GET")[1])
示例#11
0
 def next(self):
     precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
     precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
     if self.i == len(self.c.l):
         raise StopIteration
     le = self.c.l[self.i]
     self.i += 1
     return le[0]
示例#12
0
    def send(self, msg, hint=HINT_NO_HINT, fast_fail_handler=None, timeout=None, commstratseqno=None):
        """
        @precondition: self._broker_id must be an id.: idlib.is_id(self._broker_id)
        """
        precondition(idlib.is_id(self._broker_id), "self._broker_id must be an id.", broker_id=self._broker_id)

        # debugprint("%s.send(): self._broker_id: %s\n", args=(self, self._broker_id,))
        self._tcpch.send_msg(self._broker_id, msg=msg, hint=hint, fast_fail_handler=fast_fail_handler)
示例#13
0
 def __next__(self):
     precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
     precondition((self.i == len(self.c.l)) or self.c.l[self.i][1] in self.c.d, "The iterated ValueOrderedDict doesn't have this key.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
     if self.i == len(self.c.l):
         raise StopIteration()
     le = self.c.l[self.i]
     self.i += 1
     return le[0]
示例#14
0
def get_trailing_chars_without_lsbs(N):
    precondition((N >= 0) and (N < 5),
                 "N is required to be > 0 and < len(chars).",
                 N=N)
    if N == 0:
        return chars
    d = {}
    return ''.join(_get_trailing_chars_without_lsbs(N, d=d))
示例#15
0
 def delete_feature(self, simplegeohandle):
     """Delete a Places feature."""
     precondition(is_simplegeohandle(simplegeohandle),
                  "simplegeohandle is required to match the regex %s" %
                  SIMPLEGEOHANDLE_RSTR,
                  simplegeohandle=simplegeohandle)
     endpoint = self._endpoint('feature', simplegeohandle=simplegeohandle)
     return self._request(endpoint, 'DELETE')[1]
示例#16
0
 def get_context_by_address(self, address):
     """
     The server figures out the latitude and longitude from the
     street address and then does the same thing as get_context(),
     using that deduced latitude and longitude.
     """
     precondition(isinstance(address, basestring), address)
     endpoint = self._endpoint('context_by_address')
     return json_decode(self._request(endpoint, "GET", data={'address' : address})[1])
示例#17
0
def a2b(cs):
    """
    @param cs the base-32 encoded data (a string)
    """
    precondition(could_be_base32_encoded(cs),
                 "cs is required to be possibly base32 encoded data.",
                 cs=cs)

    return a2b_l(cs, num_octets_that_encode_to_this_many_quintets(len(cs)) * 8)
示例#18
0
 def delete_feature(self, simplegeohandle):
     """Delete a Places feature."""
     precondition(
         is_simplegeohandle(simplegeohandle),
         "simplegeohandle is required to match the regex %s" % SIMPLEGEOHANDLE_RSTR,
         simplegeohandle=simplegeohandle,
     )
     endpoint = self._endpoint("feature", simplegeohandle=simplegeohandle)
     return self._request(endpoint, "DELETE")[1]
示例#19
0
def py_xor_simple(str1, str2):
    """
    Benchmarks show that this is the same speed as py_xor() for small strings
    and much slower for large strings, so don't use it. --Zooko 2002-04-29
    """
    warnings.warn("deprecated", DeprecationWarning)
    precondition(len(str1) == len(str2), "str1 and str2 are required to be of the same length.", str1=str1, str2=str2)

    return ''.join(map(chr, map(operator.__xor__, map(ord, str1), map(ord, str2))))
 def get_feature(self, simplegeohandle):
     """Return the GeoJSON representation of a feature."""
     precondition(
         is_simplegeohandle(simplegeohandle),
         "simplegeohandle is required to match the regex %s" % SIMPLEGEOHANDLE_RSTR,
         simplegeohandle=simplegeohandle,
     )
     endpoint = self._endpoint("feature", simplegeohandle=simplegeohandle)
     return Feature.from_json(self._request(endpoint, "GET")[1])
示例#21
0
def a2b_l(cs, lengthinbits):
    """
    @param lengthinbits the number of bits of data in encoded into cs

    a2b_l() will return a result big enough to hold lengthinbits bits.  So for example if cs is
    4 characters long (encoding at least 15 and up to 20 bits) and lengthinbits is 16, then a2b_l()
    will return a string of length 2 (since 2 bytes is sufficient to store 16 bits).  If cs is 4
    characters long and lengthinbits is 20, then a2b_l() will return a string of length 3 (since
    3 bytes is sufficient to store 20 bits).

    Please see the warning in the docstring of b2a_l() regarding the use of b2a() versus b2a_l().

    @return the data encoded in cs
    """
    precondition(could_be_base32_encoded_l(cs, lengthinbits),
                 "cs is required to be possibly base32 encoded data.",
                 cs=cs,
                 lengthinbits=lengthinbits)

    qs = [ord(v) for v in string.translate(cs, c2vtranstable)]

    numoctets = div_ceil(lengthinbits, 8)
    numquintetsofdata = div_ceil(lengthinbits, 5)
    # append zero quintets for padding if needed
    numquintetsneeded = div_ceil(numoctets * 8, 5)
    qs.extend([0] * (numquintetsneeded - len(qs)))

    octets = []
    pos = 2048
    num = qs[0] * pos
    readybits = 5
    i = 1
    while len(octets) < numoctets:
        while pos > 256:
            pos = pos / 32
            num = num + (qs[i] * pos)
            i = i + 1
        octet = num / 256
        octets.append(octet)
        num = num - (octet * 256)
        num = num * 256
        pos = pos * 256
    assert len(
        octets) == numoctets, "len(octets): %s, numoctets: %s, octets: %s" % (
            len(octets),
            numoctets,
            octets,
        )
    res = ''.join([chr(o) for o in octets])
    precondition(
        b2a_l(res, lengthinbits) == cs,
        "cs is required to be the canonical base-32 encoding of some data.",
        b2a(res),
        res=res,
        cs=cs)
    return res
示例#22
0
def skip_if_cannot_represent_filename(u):
    precondition(isinstance(u, unicode))

    enc = get_filesystem_encoding()
    if not unicode_platform():
        try:
            u.encode(enc)
        except UnicodeEncodeError:
            raise unittest.SkipTest(
                "A non-ASCII filename could not be encoded on this platform.")
示例#23
0
 def get_context_by_address(self, address):
     """
     The server figures out the latitude and longitude from the
     street address and then does the same thing as get_context(),
     using that deduced latitude and longitude.
     """
     precondition(isinstance(address, basestring), address)
     endpoint = self._endpoint('context_by_address')
     return json_decode(
         self._request(endpoint, "GET", data={'address': address})[1])
示例#24
0
def a2b_l(cs, lengthinbits):
    """
    @param lengthinbits the number of bits of data in encoded into cs

    a2b_l() will return a result big enough to hold lengthinbits bits.  So for example if cs is
    4 characters long (encoding at least 15 and up to 20 bits) and lengthinbits is 16, then a2b_l()
    will return a string of length 2 (since 2 bytes is sufficient to store 16 bits).  If cs is 4
    characters long and lengthinbits is 20, then a2b_l() will return a string of length 3 (since
    3 bytes is sufficient to store 20 bits).  Note that b2a_l() does not mask off unused least-
    significant bits, so for example if cs is 4 characters long and lengthinbits is 17, then you
    must ensure that all three of the unused least-significant bits of cs are zero bits or you will
    get the wrong result.  This precondition is tested by assertions if assertions are enabled.
    (Generally you just require the encoder to ensure this consistency property between the least
    significant zero bits and value of lengthinbits, and reject strings that have a length-in-bits
    which isn't a multiple of 8 and yet don't have trailing zero bits, as improperly encoded.)

    Please see the warning in the docstring of b2a_l() regarding the use of b2a() versus b2a_l().

    @return the data encoded in cs
    """
    precondition(could_be_base32_encoded_l(cs, lengthinbits), "cs is required to be possibly base32 encoded data.", cs=cs, lengthinbits=lengthinbits)

    qs = map(ord, string.translate(cs, c2vtranstable))

    numoctets = (lengthinbits+7)/8
    numquintetsofdata = (lengthinbits+4)/5
    # strip trailing quintets that won't be used
    del qs[numquintetsofdata:]
    # zero out any unused bits in the final quintet
    if lengthinbits % 5 != 0:
        qs[-1] = qs[-1] >> (5-(lengthinbits % 5))
        qs[-1] = qs[-1] << (5-(lengthinbits % 5))
    # append zero quintets for padding if needed
    numquintetsneeded = (numoctets*8+4)/5
    qs.extend([0]*(numquintetsneeded-len(qs)))

    octets = []
    pos = 2048
    num = qs[0] * pos
    readybits = 5
    i = 1
    while len(octets) < numoctets:
        while pos > 256:
            pos = pos / 32
            num = num + (qs[i] * pos)
            i = i + 1
        octet = num / 256
        octets.append(octet)
        num = num - (octet * 256)
        num = num * 256
        pos = pos * 256
    assert len(octets) == numoctets, "len(octets): %s, numoctets: %s, octets: %s" % (len(octets), numoctets, octets,)
    res = ''.join(map(chr, octets))
    precondition(b2a_l(res, lengthinbits) == cs, "cs is required to be the canonical base-32 encoding of some data.", b2a(res), res=res, cs=cs)
    return res
示例#25
0
 def get_feature(self, simplegeohandle, zoom=None):
     """Return the GeoJSON representation of a feature. Zoom needs to be
     between 1-20, or None for the full polygon."""
     if not is_simplegeohandle(simplegeohandle):
         raise TypeError("simplegeohandle is required to match the regex %s, but it was %s :: %r" % (SIMPLEGEOHANDLE_RSTR, type(simplegeohandle), simplegeohandle))
     kwargs = {}
     if zoom:
         precondition(zoom >= 1 and zoom <= 20, zoom)
         kwargs['zoom'] = zoom
     endpoint = self._endpoint('feature', simplegeohandle=simplegeohandle)
     return Feature.from_json(self._request(endpoint, 'GET', data=kwargs)[1])
示例#26
0
    def __init__(self, broker_id=None, commstratseqno=None):
        """
        @precondition: broker_id must be None or an id.: (broker_id is None) or (idlib.is_id(broker_id))
        """
        precondition ((broker_id is None) or (idlib.is_id(broker_id)), "broker_id must be None or an id.", broker_id=broker_id)

        self.hint = HINT_NO_HINT
        self.hintnumexpectedresponses = 0
        self.hintnumexpectedsends = 0
        self._commstratseqno = commstratseqno
        self._broker_id = broker_id 
示例#27
0
def deep_validate_lat_lon(struc):
    precondition(isinstance(struc, (list, tuple, set)), 'argument must be a sequence (of sequences of...) numbers')
    if is_numeric(struc[0]):
        assert len(struc) == 2
        assert is_numeric(struc[1])
        assert is_valid_lat(struc[0])
        assert is_valid_lon(struc[1])
    else:
        for sub in struc:
            deep_validate_lat_lon(sub)
    return True
示例#28
0
def py_xor_simple(str1, str2):
    """
    Benchmarks show that this is the same speed as py_xor() for small strings
    and much slower for large strings, so don't use it. --Zooko 2002-04-29
    """
    warnings.warn("deprecated", DeprecationWarning)
    precondition(len(str1) == len(str2), "str1 and str2 are required to be of the same length.", str1=str1, str2=str2)

    if bytes != str: # PY3
        return bytes(map(operator.__xor__, str1, str2))
    else: # PY2
        return ''.join(map(chr, map(operator.__xor__, map(ord, str1), map(ord, str2))))
示例#29
0
    def search_by_ip(self, ipaddr, radius=None, query=None, category=None):
        """
        Search for places near an IP address, within a radius (in
        kilometers).

        The server uses guesses the latitude and longitude from the
        ipaddr and then does the same thing as search(), using that
        guessed latitude and longitude.
        """
        precondition(is_valid_ip(ipaddr), ipaddr)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring), category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = { }
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search_by_ip', ipaddr=ipaddr)

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#30
0
    def search_by_ip(self, ipaddr, radius=None, query=None, category=None):
        """
        Search for places near an IP address, within a radius (in
        kilometers).

        The server uses guesses the latitude and longitude from the
        ipaddr and then does the same thing as search(), using that
        guessed latitude and longitude.
        """
        precondition(is_valid_ip(ipaddr), ipaddr)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring),
                     category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = {}
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search_by_ip', ipaddr=ipaddr)

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#31
0
def decompress_to_fileobj(zbuf, fileobj, maxlen=(65 * (2**20)), maxmem=(65 * (2**20))):
    """
    Decompress zbuf so that it decompresses to <= maxlen bytes, while using
    <= maxmem memory, or else raise an exception.  If zbuf contains
    uncompressed data an exception will be raised.

    This function guards against memory allocation attacks.

    Note that this assumes that data written to fileobj still occupies memory,
    so such data counts against maxmem as well as against maxlen.

    @param maxlen the resulting text must not be greater than this
    @param maxmem the execution of this function must not use more than this
        amount of memory in bytes;  The higher this number is (optimally
        1032 * maxlen, or even greater), the faster this function can
        complete.  (Actually I don't fully understand the workings of zlib, so
        this function might use a *little* more than this memory, but not a
        lot more.)  (Also, this function will raise an exception if the amount
        of memory required even *approaches* maxmem.  Another reason to make
        it large.)  (Hence the default value which would seem to be
        exceedingly large until you realize that it means you can decompress
        64 KB chunks of compressiontext at a bite.)
    @param fileobj a file object to which the decompressed text will be written
    """
    precondition(hasattr(fileobj, 'write') and callable(fileobj.write), "fileobj is required to have a write() method.", fileobj=fileobj)
    precondition(isinstance(maxlen, (int, long,)) and maxlen > 0, "maxlen is required to be a real maxlen, geez!", maxlen=maxlen)
    precondition(isinstance(maxmem, (int, long,)) and maxmem > 0, "maxmem is required to be a real maxmem, geez!", maxmem=maxmem)
    precondition(maxlen <= maxmem, "maxlen is required to be <= maxmem.  All data that is written out to fileobj is counted against maxmem as well as against maxlen, so it is impossible to return a result bigger than maxmem, even if maxlen is bigger than maxmem.  See decompress_to_spool() if you want to spool a large text out while limiting the amount of memory used during the process.", maxlen=maxlen, maxmem=maxmem)

    lenzbuf = len(zbuf)
    offset = 0
    decomplen = 0
    availmem = maxmem - (76 * 2**10) # zlib can take around 76 KB RAM to do decompression

    decomp = zlib.decompressobj()
    while offset < lenzbuf:
        # How much compressedtext can we safely attempt to decompress now without going over maxmem?  zlib docs say that theoretical maximum for the zlib format would be 1032:1.
        lencompbite = availmem / 1032 # XXX TODO: The biggest compression ratio zlib can have for whole files is 1032:1.  Unfortunately I don't know if small chunks of compressiontext *within* a file can expand to more than that.  I'll assume not...  --Zooko 2001-05-12
        if lencompbite < 128:
            # If we can't safely attempt even a few bytes of compression text, let us give up.  Either maxmem was too small or this compressiontext is actually a decompression bomb.
            raise UnsafeDecompressError, "used up roughly maxmem memory. maxmem: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxmem, len(zbuf), offset, decomplen,]))
        # I wish the following were a local function like this:
        # def proc_decomp_bite(tmpstr, lencompbite=0, decomplen=decomplen, maxlen=maxlen, availmem=availmem, decompstrlist=decompstrlist, offset=offset, zbuf=zbuf):
        # ...but we can't conveniently and efficiently update the integer variables like offset in the outer scope.  Oh well.  --Zooko 2003-06-26
        try:
            if (offset == 0) and (lencompbite >= lenzbuf):
                tmpstr = decomp.decompress(zbuf)
            else:
                tmpstr = decomp.decompress(zbuf[offset:offset+lencompbite])
        except zlib.error, le:
            raise ZlibError, (offset, lencompbite, decomplen, le, )
        lentmpstr = len(tmpstr)
        decomplen = decomplen + lentmpstr
        if decomplen > maxlen:
            raise TooBigError, "length of resulting data > maxlen. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
        availmem = availmem - lentmpstr
        offset = offset + lencompbite
        fileobj.write(tmpstr)
        tmpstr = ''
示例#32
0
def a2b_l_long(cs, lengthinbits):
    precondition(could_be_base32_encoded_l(cs, lengthinbits),
                 "cs is required to be possibly base32 encoded data.",
                 lengthinbits=lengthinbits,
                 cs=cs)

    qs = [ord(v) for v in string.translate(cs, c2vtranstable)]

    # print "lengthinbits: ", lengthinbits
    numoctets = (lengthinbits + 7) / 8
    # print "numoctets: ", numoctets
    numquintetsofdata = (lengthinbits + 4) / 5
    # print "numquintetsofdata: ", numquintetsofdata
    # strip trailing quintets that won't be used
    del qs[numquintetsofdata:]
    # zero out any unused bits in the final quintet
    if lengthinbits % 5 != 0:
        qs[-1] = qs[-1] >> (5 - (lengthinbits % 5))
        qs[-1] = qs[-1] << (5 - (lengthinbits % 5))
    # append zero quintets for padding if needed
    numquintetsneeded = (
        numoctets * 8 + 4
    ) / 5 + 7  # append 7 extra zero quintets so that I can read in 40 -bit (8-quintet) chunks
    qs.extend([0] * (numquintetsneeded - len(qs)))

    octets = []
    i = 0
    CUTOFF = 2L**32
    while len(octets) < numoctets:
        # take the next 8 quintets and turn them into 5 octets
        num = 0L  # i am a LONG!  hear me roar
        for j in range(8):
            num = num * 32
            num = num + qs[i]
            i = i + 1
        for j in range(5):
            octet = num / CUTOFF
            octets.append(octet)
            num = num - (octet * CUTOFF)
            num = num * 256
    octets = octets[:numoctets]
    res = ''.join([chr(o) for o in octets])
    precondition(
        b2a_l(res, lengthinbits) == cs,
        "cs is required to be the canonical base-32 encoding of some data.",
        b2a(res),
        res=res,
        cs=cs)
    return res
示例#33
0
 def get_feature(self, simplegeohandle, zoom=None):
     """Return the GeoJSON representation of a feature. Zoom needs to be
     between 1-20, or None for the full polygon."""
     if not is_simplegeohandle(simplegeohandle):
         raise TypeError(
             "simplegeohandle is required to match the regex %s, but it was %s :: %r"
             %
             (SIMPLEGEOHANDLE_RSTR, type(simplegeohandle), simplegeohandle))
     kwargs = {}
     if zoom:
         precondition(zoom >= 1 and zoom <= 20, zoom)
         kwargs['zoom'] = zoom
     endpoint = self._endpoint('feature', simplegeohandle=simplegeohandle)
     return Feature.from_json(
         self._request(endpoint, 'GET', data=kwargs)[1])
    def from_dict(cls, data, strict_lon_validation=False):
        """
        data is a GeoJSON standard data structure, including that the
        coordinates are in GeoJSON order (lon, lat) instead of
        SimpleGeo order (lat, lon)
        """
        assert isinstance(data, dict), (type(data), repr(data))
        coordinates = deep_swap(data["geometry"]["coordinates"])
        precondition(deep_validate_lat_lon(coordinates, strict_lon_validation=strict_lon_validation), coordinates)
        feature = cls(
            simplegeohandle=data.get("id"),
            coordinates=coordinates,
            geomtype=data["geometry"]["type"],
            properties=data.get("properties"),
        )

        return feature
 def get_context_async(self, lat, lon, **kwds):
     """
     Starts an asynchronous request using GAE's URL Fetch service.
     Returns an RPC object that can be subsequently passed to get_context_result()
     
     **kwds
         Keyword arguments to pass when creating the RPC object.
         See http://code.google.com/appengine/docs/python/urlfetch/asynchronousrequests.html#create_rpc
     """
     
     rpc = urlfetch.create_rpc(**kwds)
     precondition(is_valid_lat(lat), lat)
     precondition(is_valid_lon(lon), lon)
     endpoint = self._endpoint('context', lat=lat, lon=lon)
     headers = self._headers(endpoint, 'GET')
     urlfetch.make_fetch_call(rpc, endpoint, headers=headers)
     return rpc
示例#36
0
    def __init__(self, tcpch, broker_id, host=None, port=None, asyncsock=None, commstratseqno=None):
        """
        @param tcpch: the TCPCommsHandler
        @param asyncsock: an instance of TCPConnection

        @precondition: asyncsock must be an instance of TCPConnection or nothing.: (asyncsock is None) or isinstance(asyncsock, TCPConnection.TCPConnection)
        @precondition: broker_id must be an id or None.: (broker_id is None) or idlib.is_id(broker_id)
        """
        precondition((asyncsock is None) or isinstance(asyncsock, TCPConnection.TCPConnection), "asyncsock must be an instance of TCPConnection or nothing.", asyncsock=asyncsock)
        precondition((broker_id is None) or idlib.is_id(broker_id), "broker_id must be an id or None.", broker_id=broker_id)

        CommStrat.__init__(self, broker_id, commstratseqno=commstratseqno)

        self._tcpch = tcpch
        self.host = host
        self.port = port
        self.asyncsock = asyncsock
示例#37
0
    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u], self._pending_delay),
                path=path_u,
                recursive=False,
            )
示例#38
0
 def test_bad_precond(self):
     adict = 23
     try:
         assertutil.precondition(isinstance(adict, dict),
                                 "adict is required to be a dict.",
                                 23,
                                 adict=adict,
                                 foo=None)
     except AssertionError as le:
         if sys.version_info[0] == 2:
             self.assertEqual(
                 le.args[0],
                 "precondition: 'adict is required to be a dict.' <type 'str'>, 23 <type 'int'>, 'adict': 23 <type 'int'>, 'foo': None <type 'NoneType'>"
             )
         else:
             self.assertEqual(
                 le.args[0],
                 "precondition: 'adict is required to be a dict.' <class 'str'>, 23 <class 'int'>, 'adict': 23 <class 'int'>, 'foo': None <class 'NoneType'>"
             )
示例#39
0
def should_ignore_file(path_u):
    precondition(isinstance(path_u, unicode), path_u=path_u)

    for suffix in IGNORE_SUFFIXES:
        if path_u.endswith(suffix):
            return True

    while path_u != u"":
        oldpath_u = path_u
        path_u, tail_u = os.path.split(path_u)
        if tail_u.startswith(u"."):
            return True
        if path_u == oldpath_u:
            return True  # the path was absolute
        _assert(len(path_u) < len(oldpath_u),
                path_u=path_u,
                oldpath_u=oldpath_u)

    return False
示例#40
0
def a2b_l(cs, lengthinbits):
    """
    @param lengthinbits the number of bits of data in encoded into cs

    a2b_l() will return a result big enough to hold lengthinbits bits.  So for example if cs is
    4 characters long (encoding at least 15 and up to 20 bits) and lengthinbits is 16, then a2b_l()
    will return a string of length 2 (since 2 bytes is sufficient to store 16 bits).  If cs is 4
    characters long and lengthinbits is 20, then a2b_l() will return a string of length 3 (since
    3 bytes is sufficient to store 20 bits).

    Please see the warning in the docstring of b2a_l() regarding the use of b2a() versus b2a_l().

    @return the data encoded in cs
    """
    precondition(could_be_base32_encoded_l(cs, lengthinbits), "cs is required to be possibly base32 encoded data.", cs=cs, lengthinbits=lengthinbits)

    qs = [ord(v) for v in string.translate(cs, c2vtranstable)]

    numoctets = div_ceil(lengthinbits, 8)
    numquintetsofdata = div_ceil(lengthinbits, 5)
    # append zero quintets for padding if needed
    numquintetsneeded = div_ceil(numoctets*8, 5)
    qs.extend([0]*(numquintetsneeded-len(qs)))

    octets = []
    pos = 2048
    num = qs[0] * pos
    readybits = 5
    i = 1
    while len(octets) < numoctets:
        while pos > 256:
            pos = pos / 32
            num = num + (qs[i] * pos)
            i = i + 1
        octet = num / 256
        octets.append(octet)
        num = num - (octet * 256)
        num = num * 256
        pos = pos * 256
    assert len(octets) == numoctets, "len(octets): %s, numoctets: %s, octets: %s" % (len(octets), numoctets, octets,)
    res = ''.join([chr(o) for o in octets])
    precondition(b2a_l(res, lengthinbits) == cs, "cs is required to be the canonical base-32 encoding of some data.", b2a(res), res=res, cs=cs)
    return res
示例#41
0
    def search_by_address(self,
                          address,
                          radius=None,
                          query=None,
                          category=None):
        """
        Search for places near the given address, within a radius (in
        kilometers).

        The server figures out the latitude and longitude from the
        street address and then does the same thing as search(), using
        that deduced latitude and longitude.
        """
        precondition(isinstance(address, basestring), address)
        precondition(address != '', address)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring),
                     category)

        if isinstance(address, unicode):
            address = address.encode('utf-8')
        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = {'address': address}
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search_by_address')

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#42
0
def py_xor(str1, str2):
    warnings.warn("deprecated", DeprecationWarning)
    precondition(len(str1) == len(str2), "str1 and str2 are required to be of the same length.", str1=str1, str2=str2)

    if len(str1)%4 == 0:
        a1 = array.array('i', str1)
        a2 = array.array('i', str2)
        for i in range(len(a1)):
            a2[i] = a2[i]^a1[i]
    elif len(str1)%2 == 0:
        a1 = array.array('h', str1)
        a2 = array.array('h', str2)
        for i in range(len(a1)):
            a2[i] = a2[i]^a1[i]
    else:
        a1 = array.array('c', str1)
        a2 = array.array('c', str2)
        for i in range(len(a1)):
            a2[i] = chr(ord(a2[i])^ord(a1[i]))

    return a2.tostring()
示例#43
0
    def search_by_my_ip(self, radius=None, query=None, category=None):
        """
        Search for places near your IP address, within a radius (in
        kilometers).

        The server gets the IP address from the HTTP connection (this
        may be the IP address of your device or of a firewall, NAT, or
        HTTP proxy device between you and the server), and then does
        the same thing as search_by_ip(), using that IP address.
        """
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring), category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = { }
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search_by_my_ip')

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#44
0
    def search_by_my_ip(self, radius=None, query=None, category=None):
        """
        Search for places near your IP address, within a radius (in
        kilometers).

        The server gets the IP address from the HTTP connection (this
        may be the IP address of your device or of a firewall, NAT, or
        HTTP proxy device between you and the server), and then does
        the same thing as search_by_ip(), using that IP address.
        """
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring),
                     category)

        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = {}
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category

        endpoint = self._endpoint('search_by_my_ip')

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
示例#45
0
def a2b_l_long(cs, lengthinbits):
    precondition(could_be_base32_encoded_l(cs, lengthinbits), "cs is required to be possibly base32 encoded data.", lengthinbits=lengthinbits, cs=cs)

    qs = map(ord, string.translate(cs, c2vtranstable))

    # print "lengthinbits: ", lengthinbits
    numoctets = (lengthinbits+7)/8
    # print "numoctets: ", numoctets
    numquintetsofdata = (lengthinbits+4)/5
    # print "numquintetsofdata: ", numquintetsofdata
    # strip trailing quintets that won't be used
    del qs[numquintetsofdata:]
    # zero out any unused bits in the final quintet
    if lengthinbits % 5 != 0:
        qs[-1] = qs[-1] >> (5-(lengthinbits % 5))
        qs[-1] = qs[-1] << (5-(lengthinbits % 5))
    # append zero quintets for padding if needed
    numquintetsneeded = (numoctets*8+4)/5+7 # append 7 extra zero quintets so that I can read in 40 -bit (8-quintet) chunks
    qs.extend([0]*(numquintetsneeded-len(qs)))

    octets = []
    i = 0
    CUTOFF = 2L**32
    while len(octets) < numoctets:
        # take the next 8 quintets and turn them into 5 octets
        num = 0L # i am a LONG!  hear me roar
        for j in range(8):
            num = num * 32
            num = num + qs[i]
            i = i + 1
        for j in range(5):
            octet = num / CUTOFF
            octets.append(octet)
            num = num - (octet * CUTOFF)
            num = num * 256
    octets = octets[:numoctets]
    res = ''.join(map(chr, octets))
    precondition(b2a_l(res, lengthinbits) == cs, "cs is required to be the canonical base-32 encoding of some data.", b2a(res), res=res, cs=cs)
    return res
示例#46
0
def import_all_python_files(packages):
    precondition(not isinstance(packages, basestring), "packages is required to be a sequence.", packages=packages) # common mistake
    for package in packages:
        packagedir = '/'.join(package.split('.'))

        for (dirpath, dirnames, filenames) in os.walk(packagedir):
            for filename in (filename for filename in filenames if filename.endswith('.py')):
                dirs = dirpath.split("/")
                if filename != "__init__.py":
                    dirs.append(filename[:-3])
                import_str = "%s" % ".".join(dirs)
                if import_str not in ("setup", __name__):
                    try:
                        __import__(import_str)
                    except ImportError, le:
                        if 'No module named' in str(le):
                            # Oh whoops I guess that Python file we found isn't a module of this package. Nevermind.
                            pass
                        else:
                            sys.stderr.write("WARNING, importing %s resulted in an ImportError %s. I'm ignoring this ImportError, as I was trying to import it only for the purpose of marking its import-time statements as covered for code-coverage accounting purposes.\n" % (import_str, le,))
                    except Exception, le:
                        sys.stderr.write("WARNING, importing %s resulted in an Exception %s. I'm ignoring this Exception, as I was trying to import it only for the purpose of marking its import-time statements as covered for code-coverage accounting purposes.\n" % (import_str, le,))
示例#47
0
def dict_to_strategy(dict, mtm, broker_id=None, commstratseqno=None):
    """
    @raises UnsupportedTypeError: if `dict' is not either a TCP, Relay, Crypto, or Pickup
    
    @precondition: broker_id must be an id or None.: (broker_id is None) or (idlib.is_id(broker_id))
    """
    precondition ((broker_id is None) or (idlib.is_sloppy_id(broker_id)), "broker_id must be an id or None.", broker_id=broker_id)

    if not (dict.get('comm strategy type') in ("TCP", "relay", "Relay", "crypto", "Crypto", "pickup", "Pickup",)):
        raise UnsupportedTypeError, "dict must be either a TCP, Relay, Crypto or Pickup." + " -- " + "dict: %s" % humanreadable.hr(dict)

    dictbroker_id = dict.get('broker id')
    if (broker_id is not None) and (dictbroker_id is not None):
        assert idlib.equal(broker_id, dictbroker_id)
    if broker_id is None:
        broker_id = dictbroker_id

    if dict.get('comm strat sequence num') is not None:
        try:
            DataTypes.check_template(dict.get('comm strat sequence num'), DataTypes.INTEGER)
        except DataTypes.BadFormatError, le:
            raise DataTypes.BadFormatError, { 'cause': le, 'explanation': "comm strat sequence number is not an INTEGER", 'comm strat sequence number': dict.get('comm strat sequence num'), 'dict': dict, }
        commstratseqno = dict.get('comm strat sequence num')
示例#48
0
def py_xor(str1, str2):
    warnings.warn("deprecated", DeprecationWarning)
    precondition(len(str1) == len(str2),
                 "str1 and str2 are required to be of the same length.",
                 str1=str1,
                 str2=str2)

    if len(str1) % 4 == 0:
        a1 = array.array('i', str1)
        a2 = array.array('i', str2)
        for i in range(len(a1)):
            a2[i] = a2[i] ^ a1[i]
    elif len(str1) % 2 == 0:
        a1 = array.array('h', str1)
        a2 = array.array('h', str2)
        for i in range(len(a1)):
            a2[i] = a2[i] ^ a1[i]
    else:
        a1 = array.array('c', str1)
        a2 = array.array('c', str2)
        for i in range(len(a1)):
            a2[i] = chr(ord(a2[i]) ^ ord(a1[i]))

    return a2.tostring()
示例#49
0
    def search(self, lat, lon, radius=None, query=None, category=None):
        """Search for places near a lat/lon."""
        precondition(is_valid_lat(lat), lat)
        precondition(is_valid_lon(lon), lon)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring), category)

        kwargs = {}
        if radius:
            kwargs["radius"] = radius
        if query:
            kwargs["q"] = query
        if category:
            kwargs["category"] = category
        quargs = urllib.urlencode(kwargs)
        if quargs:
            quargs = "?" + quargs
        endpoint = self._endpoint("search", lat=lat, lon=lon, quargs=quargs)

        result = self._request(endpoint, "GET")[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc["features"]]
示例#50
0
    def search_by_address(self, address, radius=None, query=None, category=None):
        """
        Search for places near the given address, within a radius (in
        kilometers).

        The server figures out the latitude and longitude from the
        street address and then does the same thing as search(), using
        that deduced latitude and longitude.
        """
        precondition(isinstance(address, basestring), address)
        precondition(address != '', address)
        precondition(radius is None or is_numeric(radius), radius)
        precondition(query is None or isinstance(query, basestring), query)
        precondition(category is None or isinstance(category, basestring), category)

        if isinstance(address, unicode):
            address = address.encode('utf-8')
        if isinstance(query, unicode):
            query = query.encode('utf-8')
        if isinstance(category, unicode):
            category = category.encode('utf-8')

        kwargs = { 'address': address }
        if radius:
            kwargs['radius'] = radius
        if query:
            kwargs['q'] = query
        if category:
            kwargs['category'] = category
 
        endpoint = self._endpoint('search_by_address')

        result = self._request(endpoint, 'GET', data=kwargs)[1]

        fc = json_decode(result)
        return [Feature.from_dict(f) for f in fc['features']]
    def __init__(self, coordinates, geomtype='Point', simplegeohandle=None, properties=None):
        """
        The simplegeohandle and the record_id are both optional -- you
        can have one or the other or both or neither.

        A simplegeohandle is globally unique and is assigned by the
        Places service. It is returned from the Places service in the
        response to a request to add a place to the Places database
        (the add_feature method).

        The simplegeohandle is passed in as an argument to the
        constructor, named "simplegeohandle", and is stored in the
        "id" attribute of the Feature instance.

        A record_id is scoped to your particular user account and is
        chosen by you. The only use for the record_id is in case you
        call add_feature and you have already previously added that
        feature to the database -- if there is already a feature from
        your user account with the same record_id then the Places
        service will return that feature to you, along with that
        feature's simplegeohandle, instead of making a second, duplicate
        feature.

        A record_id is passed in as a value in the properties dict
        named "record_id".

        geomtype is a GeoJSON geometry type such as "Point",
        "Polygon", or "Multipolygon". coordinates is a GeoJSON
        coordinates *except* that each lat/lon pair is written in
        order lat, lon instead of the GeoJSON order of lon, at.

        When txsimplegeo.shared is constructing a Feature object from
        the result of an HTTP query to the SimpleGeo service, it will
        stash a reference to the twisted.web.client.Response object in
        the "._http_response" member variable of the Feature
        object. This could be useful for debugging, investigating the
        performance of the SimpleGeo service, etc.
        """
        precondition(simplegeohandle is None or is_simplegeohandle(simplegeohandle), "simplegeohandle is required to be None or to match the regex %s" % SIMPLEGEOHANDLE_RSTR, simplegeohandle=simplegeohandle)
        record_id = properties and properties.get('record_id') or None
        precondition(record_id is None or isinstance(record_id, basestring), "record_id is required to be None or a string.", record_id=record_id, properties=properties)
        precondition(deep_validate_lat_lon(coordinates), coordinates)

        self.id = simplegeohandle
        self.coordinates = coordinates
        self.geomtype = geomtype
        self.properties = {}
        if properties:
            self.properties.update(properties)
示例#52
0
    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state)
        precondition(self._filter is None, "only one watch is supported")
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)

        self._path = path
        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode(sys.getfilesystemencoding())
            _assert(isinstance(path_u, unicode), path_u=path_u)

        self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE

        if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
            self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
        if mask & IN_ATTRIB:
            self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY

        self._recursive = TRUE if recursive else FALSE
        self._callbacks = callbacks or []
        self._hDirectory = _open_directory(path_u)
示例#53
0
    def __init__(self, pubkey, lowerstrategy, broker_id=None):
        """
        @param lowerstrategy: the lower-level comms strategy, either given
            by meta-tracking or suggested by the way that the last message
            arrived (e.g. For TCP, the suggested strategy is to send a
            message back down the connection over which the last message
            arrived.)

        @precondition: pubkey must be a well-formed keyutil.: keyutil.publicKeyForCommunicationSecurityIsWellFormed(pubkey)
        @precondition: lowerstrategy must be a CommStrat.: isinstance(lowerstrategy, CommStrat)
        @precondition: broker_id must be the id of pubkey, or else it must be None.: (broker_id is None) or (idlib.equal(idlib.make_id(pubkey, 'broker'), broker_id))
        """
        precondition(keyutil.publicKeyForCommunicationSecurityIsWellFormed(pubkey), "pubkey must be a well-formed keyutil.", pubkey=pubkey)
        precondition(isinstance(lowerstrategy, CommStrat), "lowerstrategy must be a CommStrat.", lowerstrategy=lowerstrategy)
        precondition((broker_id is None) or (idlib.equal(idlib.make_id(pubkey, 'broker'), broker_id)), "broker_id must be the id of pubkey, or else it must be `None'.", broker_id=broker_id)

        CommStrat.__init__(self, idlib.make_id(pubkey, 'broker'))

        self._pubkey = pubkey
        self._lowerstrategy = lowerstrategy
示例#54
0
 def get_context(self, lat, lon):
     precondition(is_valid_lat(lat), lat)
     precondition(is_valid_lon(lon), lon)
     endpoint = self._endpoint('context', lat=lat, lon=lon)
     return json_decode(self._request(endpoint, "GET")[1])
示例#55
0
def b2a_l(os, lengthinbits):
    """
    @param os the data to be encoded (a string)
    @param lengthinbits the number of bits of data in os to be encoded

    b2a_l() will generate a base-32 encoded string big enough to encode
    lengthinbits bits.  So for example if os is 2 bytes long and lengthinbits is
    15, then b2a_l() will generate a 3-character- long base-32 encoded string
    (since 3 quintets is sufficient to encode 15 bits).  If os is 2 bytes long
    and lengthinbits is 16 (or None), then b2a_l() will generate a 4-character
    string.  Note that if os is 2 bytes long and lengthinbits is 15, then the
    least-significant bit of os is ignored.

    Warning: if you generate a base-32 encoded string with b2a_l(), and then someone else tries to
    decode it by calling a2b() instead of  a2b_l(), then they will (probably) get a different
    string than the one you encoded!  So only use b2a_l() when you are sure that the encoding and
    decoding sides know exactly which lengthinbits to use.  If you do not have a way for the
    encoder and the decoder to agree upon the lengthinbits, then it is best to use b2a() and
    a2b().  The only drawback to using b2a() over b2a_l() is that when you have a number of
    bits to encode that is not a multiple of 8, b2a() can sometimes generate a base-32 encoded
    string that is one or two characters longer than necessary.

    @return the contents of os in base-32 encoded form
    """
    precondition(isinstance(lengthinbits, (
        int,
        long,
    )),
                 "lengthinbits is required to be an integer.",
                 lengthinbits=lengthinbits)
    precondition(
        div_ceil(lengthinbits, 8) == len(os),
        "lengthinbits is required to specify a number of bits storable in exactly len(os) octets.",
        lengthinbits=lengthinbits,
        lenos=len(os))
    # precondition((lengthinbits % 8==0) or ((ord(os[-1]) % (2**(8-(lengthinbits%8))))==0), "Any unused least-significant bits in os are required to be zero bits.", ord(os[-1]), lengthinbits=lengthinbits) # removing this precondition, because I like to use it with random os, like this: base32.b2a_l(file("/dev/urandom", "r").read(9), 65)

    os = [ord(o) for o in os]

    numquintets = div_ceil(lengthinbits, 5)
    numoctetsofdata = div_ceil(lengthinbits, 8)
    # print "numoctetsofdata: %s, len(os): %s, lengthinbits: %s, numquintets: %s" % (numoctetsofdata, len(os), lengthinbits, numquintets,)
    # zero out any unused bits in the final octet
    if lengthinbits % 8 != 0:
        os[-1] >>= (8 - (lengthinbits % 8))
        os[-1] <<= (8 - (lengthinbits % 8))
    # append zero octets for padding if needed
    numoctetsneeded = div_ceil(numquintets * 5, 8) + 1
    os.extend([0] * (numoctetsneeded - len(os)))

    quintets = []
    cutoff = 256
    num = os[0]
    i = 0
    while len(quintets) < numquintets:
        i = i + 1
        assert len(
            os
        ) > i, "len(os): %s, i: %s, len(quintets): %s, numquintets: %s, lengthinbits: %s, numoctetsofdata: %s, numoctetsneeded: %s, os: %s" % (
            len(os),
            i,
            len(quintets),
            numquintets,
            lengthinbits,
            numoctetsofdata,
            numoctetsneeded,
            os,
        )
        num = num * 256
        num = num + os[i]
        if cutoff == 1:
            cutoff = 256
            continue
        cutoff = cutoff * 8
        quintet = num / cutoff
        quintets.append(quintet)
        num = num - (quintet * cutoff)

        cutoff = cutoff / 32
        quintet = num / cutoff
        quintets.append(quintet)
        num = num - (quintet * cutoff)

    if len(quintets) > numquintets:
        assert len(quintets) == (
            numquintets +
            1), "len(quintets): %s, numquintets: %s, quintets: %s" % (
                len(quintets),
                numquintets,
                quintets,
            )
        quintets = quintets[:numquintets]
    res = string.translate(''.join([chr(q) for q in quintets]), v2ctranstable)
    assert could_be_base32_encoded_l(
        res, lengthinbits), "lengthinbits: %s, res: %s" % (
            lengthinbits,
            res,
        )
    return res
示例#56
0
class Feature:
    def __init__(self, coordinates, geomtype='Point', simplegeohandle=None, properties=None, strict_lon_validation=False):
        """
        The simplegeohandle and the record_id are both optional -- you
        can have one or the other or both or neither.

        A simplegeohandle is globally unique and is assigned by the
        Places service. It is returned from the Places service in the
        response to a request to add a place to the Places database
        (the add_feature method).

        The simplegeohandle is passed in as an argument to the
        constructor, named "simplegeohandle", and is stored in the
        "id" attribute of the Feature instance.

        A record_id is scoped to your particular user account and is
        chosen by you. The only use for the record_id is in case you
        call add_feature and you have already previously added that
        feature to the database -- if there is already a feature from
        your user account with the same record_id then the Places
        service will return that feature to you, along with that
        feature's simplegeohandle, instead of making a second, duplicate
        feature.

        A record_id is passed in as a value in the properties dict
        named "record_id".

        geomtype is a GeoJSON geometry type such as "Point",
        "Polygon", or "Multipolygon". coordinates is a GeoJSON
        coordinates *except* that each lat/lon pair is written in
        order lat, lon instead of the GeoJSON order of lon, at.

        When a Feature is being submitted to the SimpleGeo Places
        database, if there is a key 'private' in the properties dict
        which is set to True, then the Feature is intended to be
        visible only to your user account. If there is no 'private'
        key or if there is a 'private' key which is set to False, then
        the Feature is intended to be merged into the publicly visible
        Places Database.

        Note that even if it is intended to be merged into the public
        Places Database the actual process of merging it into the
        public shared database may take some time, and the newly added
        Feature will be visible to your account right away even if it
        isn't (yet) visible to the public.

        For the meaning of strict_lon_validation, please see the
        function is_valid_lon().
        """
        try:
            deep_validate_lat_lon(coordinates, strict_lon_validation=strict_lon_validation)
        except TypeError, le:
            raise TypeError("The first argument, 'coordinates' is required to be a 2-element sequence of lon, lat for a point (or a more complicated set of coordinates for polygons or multipolygons), but it was %s :: %r. The error that was raised from validating this was: %s" % (type(coordinates), coordinates, le))

        if not (simplegeohandle is None or is_simplegeohandle(simplegeohandle)):
            raise TypeError("The third argument, 'simplegeohandle' is required to be None or to match this regex: %s, but it was %s :: %r" % (SIMPLEGEOHANDLE_RSTR, type(simplegeohandle), simplegeohandle))

        record_id = properties and properties.get('record_id') or None
        if not (record_id is None or isinstance(record_id, basestring)):
            raise TypeError("record_id is required to be None or a string, but it was: %r :: %s." % (type(record_id), record_id))
        self.strict_lon_validation = strict_lon_validation
        precondition(coordinates)
        self.id = simplegeohandle
        self.coordinates = coordinates
        self.geomtype = geomtype
        self.properties = {'private': False}
        if properties:
            self.properties.update(properties)
示例#57
0
def decompress_to_spool(zbuf,
                        fileobj,
                        maxlen=(65 * (2**20)),
                        maxmem=(65 * (2**20))):
    """
    Decompress zbuf so that it decompresses to <= maxlen bytes, while using
    <= maxmem memory, or else raise an exception.  If zbuf contains
    uncompressed data an exception will be raised.

    This function guards against memory allocation attacks.

    Note that this assumes that data written to fileobj does *not* continue to
    occupy memory, so such data doesn't count against maxmem, although of
    course it still counts against maxlen.

    @param maxlen the resulting text must not be greater than this
    @param maxmem the execution of this function must not use more than this
        amount of memory in bytes;  The higher this number is (optimally
        1032 * maxlen, or even greater), the faster this function can
        complete.  (Actually I don't fully understand the workings of zlib, so
        this function might use a *little* more than this memory, but not a
        lot more.)  (Also, this function will raise an exception if the amount
        of memory required even *approaches* maxmem.  Another reason to make
        it large.)  (Hence the default value which would seem to be
        exceedingly large until you realize that it means you can decompress
        64 KB chunks of compressiontext at a bite.)
    @param fileobj the decompressed text will be written to it
    """
    precondition(hasattr(fileobj, 'write') and callable(fileobj.write),
                 "fileobj is required to have a write() method.",
                 fileobj=fileobj)
    precondition(isinstance(maxlen, int) and maxlen > 0,
                 "maxlen is required to be a real maxlen, geez!",
                 maxlen=maxlen)
    precondition(isinstance(maxmem, int) and maxmem > 0,
                 "maxmem is required to be a real maxmem, geez!",
                 maxmem=maxmem)

    tmpstr = ''
    lenzbuf = len(zbuf)
    offset = 0
    decomplen = 0
    availmem = maxmem - (
        76 * 2**10)  # zlib can take around 76 KB RAM to do decompression

    decomp = zlib.decompressobj()
    while offset < lenzbuf:
        # How much compressedtext can we safely attempt to decompress now without going over `maxmem'?  zlib docs say that theoretical maximum for the zlib format would be 1032:1.
        lencompbite = availmem / 1032  # XXX TODO: The biggest compression ratio zlib can have for whole files is 1032:1.  Unfortunately I don't know if small chunks of compressiontext *within* a file can expand to more than that.  I'll assume not...  --Zooko 2001-05-12
        if lencompbite < 128:
            # If we can't safely attempt even a few bytes of compression text, let us give up.  Either `maxmem' was too small or this compressiontext is actually a decompression bomb.
            raise UnsafeDecompressError(
                "used up roughly `maxmem' memory. maxmem: %s, len(zbuf): %s, offset: %s, decomplen: %s"
                % tuple(map(hr, [
                    maxmem,
                    len(zbuf),
                    offset,
                    decomplen,
                ])))
        # I wish the following were a local function like this:
        # def proc_decomp_bite(tmpstr, lencompbite=0, decomplen=decomplen, maxlen=maxlen, availmem=availmem, decompstrlist=decompstrlist, offset=offset, zbuf=zbuf):
        # ...but we can't conveniently and efficiently update the integer variables like offset in the outer scope.  Oh well.  --Zooko 2003-06-26
        try:
            if (offset == 0) and (lencompbite >= lenzbuf):
                tmpstr = decomp.decompress(zbuf)
            else:
                tmpstr = decomp.decompress(zbuf[offset:offset + lencompbite])
        except zlib.error as le:
            raise ZlibError(
                offset,
                lencompbite,
                decomplen,
                le,
            )
        lentmpstr = len(tmpstr)
        decomplen = decomplen + lentmpstr
        if decomplen > maxlen:
            raise TooBigError(
                "length of resulting data > `maxlen'. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s"
                % tuple(map(hr, [
                    maxlen,
                    len(zbuf),
                    offset,
                    decomplen,
                ])))
        offset = offset + lencompbite
        fileobj.write(tmpstr)
        tmpstr = ''

    try:
        tmpstr = decomp.flush()
    except zlib.error as le:
        raise ZlibError(
            offset,
            lencompbite,
            decomplen,
            le,
        )
    lentmpstr = len(tmpstr)
    decomplen = decomplen + lentmpstr
    if decomplen > maxlen:
        raise TooBigError(
            "length of resulting data > `maxlen'. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s"
            % tuple(map(hr, [
                maxlen,
                len(zbuf),
                offset,
                decomplen,
            ])))
    offset = offset + lencompbite
    fileobj.write(tmpstr)
    tmpstr = ''
示例#58
0
 def __init__(self, filecap, all_contents):
     precondition(isinstance(filecap, (uri.CHKFileURI, uri.LiteralFileURI)),
                  filecap)
     self.all_contents = all_contents
     self.my_uri = filecap
     self.storage_index = self.my_uri.get_storage_index()
示例#59
0
def a2b_l(cs, lengthinbits):
    """
    @param lengthinbits the number of bits of data in encoded into cs

    a2b_l() will return a result big enough to hold lengthinbits bits.  So for example if cs is
    4 characters long (encoding at least 15 and up to 20 bits) and lengthinbits is 16, then a2b_l()
    will return a string of length 2 (since 2 bytes is sufficient to store 16 bits).  If cs is 4
    characters long and lengthinbits is 20, then a2b_l() will return a string of length 3 (since
    3 bytes is sufficient to store 20 bits).  Note that b2a_l() does not mask off unused least-
    significant bits, so for example if cs is 4 characters long and lengthinbits is 17, then you
    must ensure that all three of the unused least-significant bits of cs are zero bits or you will
    get the wrong result.  This precondition is tested by assertions if assertions are enabled.
    (Generally you just require the encoder to ensure this consistency property between the least
    significant zero bits and value of lengthinbits, and reject strings that have a length-in-bits
    which isn't a multiple of 8 and yet don't have trailing zero bits, as improperly encoded.)

    Please see the warning in the docstring of b2a_l() regarding the use of b2a() versus b2a_l().

    @return the data encoded in cs
    """
    precondition(could_be_base32_encoded_l(cs, lengthinbits),
                 "cs is required to be possibly base32 encoded data.",
                 cs=cs,
                 lengthinbits=lengthinbits)

    qs = map(ord, string.translate(cs, c2vtranstable))

    numoctets = (lengthinbits + 7) / 8
    numquintetsofdata = (lengthinbits + 4) / 5
    # strip trailing quintets that won't be used
    del qs[numquintetsofdata:]
    # zero out any unused bits in the final quintet
    if lengthinbits % 5 != 0:
        qs[-1] = qs[-1] >> (5 - (lengthinbits % 5))
        qs[-1] = qs[-1] << (5 - (lengthinbits % 5))
    # append zero quintets for padding if needed
    numquintetsneeded = (numoctets * 8 + 4) / 5
    qs.extend([0] * (numquintetsneeded - len(qs)))

    octets = []
    pos = 2048
    num = qs[0] * pos
    readybits = 5
    i = 1
    while len(octets) < numoctets:
        while pos > 256:
            pos = pos / 32
            num = num + (qs[i] * pos)
            i = i + 1
        octet = num / 256
        octets.append(octet)
        num = num - (octet * 256)
        num = num * 256
        pos = pos * 256
    assert len(
        octets) == numoctets, "len(octets): %s, numoctets: %s, octets: %s" % (
            len(octets),
            numoctets,
            octets,
        )
    res = ''.join(map(chr, octets))
    precondition(
        b2a_l(res, lengthinbits) == cs,
        "cs is required to be the canonical base-32 encoding of some data.",
        b2a(res),
        res=res,
        cs=cs)
    return res
示例#60
0
def b2a_l(os, lengthinbits):
    """
    @param os the data to be encoded (a string)
    @param lengthinbits the number of bits of data in os to be encoded

    b2a_l() will generate a base-32 encoded string big enough to encode lengthinbits bits.  So for
    example if os is 2 bytes long and lengthinbits is 15, then b2a_l() will generate a 3-character-
    long base-32 encoded string (since 3 quintets is sufficient to encode 15 bits).  If os is
    2 bytes long and lengthinbits is 16 (or None), then b2a_l() will generate a 4-character string.
    Note that b2a_l() does not mask off unused least-significant bits, so for example if os is
    2 bytes long and lengthinbits is 15, then you must ensure that the unused least-significant bit
    of os is a zero bit or you will get the wrong result.  This precondition is tested by assertions
    if assertions are enabled.

    Warning: if you generate a base-32 encoded string with b2a_l(), and then someone else tries to
    decode it by calling a2b() instead of  a2b_l(), then they will (probably) get a different
    string than the one you encoded!  So only use b2a_l() when you are sure that the encoding and
    decoding sides know exactly which lengthinbits to use.  If you do not have a way for the
    encoder and the decoder to agree upon the lengthinbits, then it is best to use b2a() and
    a2b().  The only drawback to using b2a() over b2a_l() is that when you have a number of
    bits to encode that is not a multiple of 8, b2a() can sometimes generate a base-32 encoded
    string that is one or two characters longer than necessary.

    @return the contents of os in base-32 encoded form
    """
    precondition(isinstance(lengthinbits, (
        int,
        long,
    )),
                 "lengthinbits is required to be an integer.",
                 lengthinbits=lengthinbits)
    precondition(
        (lengthinbits + 7) / 8 == len(os),
        "lengthinbits is required to specify a number of bits storable in exactly len(os) octets.",
        lengthinbits=lengthinbits,
        lenos=len(os))
    precondition(
        (lengthinbits % 8 == 0)
        or ((ord(os[-1]) % (2**(8 - (lengthinbits % 8)))) == 0),
        "Any unused least-significant bits in os are required to be zero bits.",
        ord(os[-1]),
        lengthinbits=lengthinbits)

    os = map(ord, os)

    numquintets = (lengthinbits + 4) / 5
    numoctetsofdata = (lengthinbits + 7) / 8
    # print "numoctetsofdata: %s, len(os): %s, lengthinbits: %s, numquintets: %s" % (numoctetsofdata, len(os), lengthinbits, numquintets,)
    # strip trailing octets that won't be used
    del os[numoctetsofdata:]
    # zero out any unused bits in the final octet
    if lengthinbits % 8 != 0:
        os[-1] = os[-1] >> (8 - (lengthinbits % 8))
        os[-1] = os[-1] << (8 - (lengthinbits % 8))
    # append zero octets for padding if needed
    numoctetsneeded = (numquintets * 5 + 7) / 8 + 1
    os.extend([0] * (numoctetsneeded - len(os)))

    quintets = []
    cutoff = 256
    num = os[0]
    i = 0
    while len(quintets) < numquintets:
        i = i + 1
        assert len(
            os
        ) > i, "len(os): %s, i: %s, len(quintets): %s, numquintets: %s, lengthinbits: %s, numoctetsofdata: %s, numoctetsneeded: %s, os: %s" % (
            len(os),
            i,
            len(quintets),
            numquintets,
            lengthinbits,
            numoctetsofdata,
            numoctetsneeded,
            os,
        )
        num = num * 256
        num = num + os[i]
        if cutoff == 1:
            cutoff = 256
            continue
        cutoff = cutoff * 8
        quintet = num / cutoff
        quintets.append(quintet)
        num = num - (quintet * cutoff)

        cutoff = cutoff / 32
        quintet = num / cutoff
        quintets.append(quintet)
        num = num - (quintet * cutoff)

    if len(quintets) > numquintets:
        assert len(quintets) == (
            numquintets +
            1), "len(quintets): %s, numquintets: %s, quintets: %s" % (
                len(quintets),
                numquintets,
                quintets,
            )
        quintets = quintets[:numquintets]
    res = string.translate(string.join(map(chr, quintets), ''), v2ctranstable)
    assert could_be_base32_encoded_l(
        res, lengthinbits), "lengthinbits: %s, res: %s" % (
            lengthinbits,
            res,
        )
    return res