Beispiel #1
0
def curve25519_donna_scalarmult_int(n, p=9):
  """Performs scalarmult on the CV25519 elliptic curve.

  Args:
    n: An integer (scalar of an EC point).
    p: An integer containing the x coordinate of a group element (point on the EC).
  """
  k = (n & ~(1 << 255 | 7)) | 1 << 254
  ql, x1, x2, z2, x3, z3, do_swap = 2 ** 255 - 19, p, 1, 0, p, 1, 0
  for t in xrange(254, -1, -1):
    kt = (k >> t) & 1
    if do_swap ^ kt:
      x2, x3, z2, z3 = x3, x2, z3, z2
    do_swap = kt
    a, b = (x2 + z2) % ql, (x2 - z2) % ql
    aa, bb = (a * a) % ql, (b * b) % ql
    c, d = (x3 + z3) % ql, (x3 - z3) % ql
    da, cb = d * a % ql, c * b % ql
    d1, d2 = da + cb, da - cb
    x3, z3 = d1 * d1 % ql, x1 * d2 * d2 % ql
    x2, e = aa * bb % ql, (aa - bb) % ql
    z2 = e * (aa + 121665 * e) % ql
  if do_swap:
    x2, x3, z2, z3 = x3, x2, z3, z2
  return (x2 * pow(z2, ql - 2, ql)) % ql
Beispiel #2
0
def aes_key_wrap(keytable, data, is_slow_cipher=False, _pack=struct.pack, _unpack=struct.unpack):
  # https://tools.ietf.org/html/rfc3394
  if len(data) & 7:
    raise ValueError('Input data size of aes_key_wrap must be divisible by 8, got: %d' % len(data))
  r, n = [None], len(data) >> 3
  n1 = n + 1
  for i in xrange(1, n1):
    r.append(data[(i << 3) - 8 : (i << 3)])
  a = b'\xa6' * 8  # IV.
  encrypt_func = gpgs.get_cipher_cons('aes-256', is_slow_cipher, 0)[0](keytable).encrypt
  for j in xrange(6):
    for i in xrange(1, n1):
      b = encrypt_func(a + r[i])
      a, r[i] = _pack('>Q', _unpack('>Q', b[:8])[0] ^ (n * j + i)), b[8:]
  r[0] = a
  return b''.join(r)
Beispiel #3
0
 def _add_encrypted(self, cdata, _crc24=gpgs.crc24):
     cpre, _b2a, _bufcap, fwrite = self._cpre, self._b2a, self.bufcap, self._fwrite
     cpre += cdata  # Long copy. TODO(pts): Avoid it with multiple writes.
     del cdata
     lcpb = len(cpre) - len(cpre) % _bufcap
     #if lcpb < _bufcap:  # May happen only if called from .close().
     if _b2a:
         abuf, asize, acrc, _buffer = self._abuf, self._asize, self._acrc, buffer
         for i in xrange(0, lcpb, _bufcap):  # Usually only once.
             if self._ciphp:
                 # TODO(pts): Reset state on fwrite failure (everywhere).
                 header = self._packet_hc + self._qhc  # packet_type in (9, 18).
                 self._ciphp = False
             else:
                 header = self._qhc
             # We copy it to make crc24 faster on Python 2.7.
             data = cpre[i:i + _bufcap]
             acrc = _crc24(data, _crc24(header, acrc))  # Very slow.
             abuf.append(header)
             abuf.append(data)
             asize += len(header) + len(data)
             if asize >= 48:
                 adata, lam = b''.join(abuf), asize % 48
                 lal = asize - lam
                 # We need a loop here so that we get '\n' after each 48 (+16) bytes.
                 for i in xrange(0, lal, 48):
                     fwrite(_b2a(_buffer(adata, i,
                                         48)))  # Contains trailing '\n'.
                 abuf[:] = (adata[lal:], )
                 asize = lam
         self._cpre, self._asize, self._acrc = cpre[lcpb:], asize, acrc
     else:
         for i in xrange(0, lcpb, _bufcap):  # Usually only once.
             if self._ciphp:
                 # TODO(pts): Reset state on fwrite failure (everywhere).
                 fwrite(self._packet_hc +
                        self._qhc)  # packet_type in (9, 18).
                 self._ciphp = False
             else:
                 fwrite(self._qhc)
             fwrite(buffer(cpre, i, _bufcap))
         self._cpre = cpre[lcpb:]
Beispiel #4
0
def slow_sha512_process(block,
                        hh,
                        _izip=izip,
                        _rotr64=_sha512_rotr64,
                        _k=_sha512_k):
    w = [0] * 80
    w[:16] = struct.unpack('>16Q', block)
    for i in xrange(16, 80):
        w[i] = (w[i - 16] + (_rotr64(w[i - 15], 1) ^ _rotr64(w[i - 15], 8) ^
                             (w[i - 15] >> 7)) + w[i - 7] +
                (_rotr64(w[i - 2], 19) ^ _rotr64(w[i - 2], 61) ^
                 (w[i - 2] >> 6))) & 0xffffffffffffffff
    a, b, c, d, e, f, g, h = hh
    for i in xrange(80):
        t1 = h + (_rotr64(e, 14) ^ _rotr64(e, 18) ^ _rotr64(e, 41)) + (
            (e & f) ^ ((~e) & g)) + _k[i] + w[i]
        t2 = (_rotr64(a, 28) ^ _rotr64(a, 34) ^ _rotr64(a, 39)) + ((a & b) ^
                                                                   (a & c) ^
                                                                   (b & c))
        a, b, c, d, e, f, g, h = (t1 + t2) & 0xffffffffffffffff, a, b, c, (
            d + t1) & 0xffffffffffffffff, e, f, g
    return [(x + y) & 0xffffffffffffffff
            for x, y in _izip(hh, (a, b, c, d, e, f, g, h))]
Beispiel #5
0
def slow_sha1_process(block, hh, _izip=izip, _rotl=_sha1_rotl32):
    w = [0] * 80
    w[:16] = struct.unpack('>16L', block)
    for i in xrange(16, 80):
        w[i] = _rotl(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1)
    a, b, c, d, e = hh
    for i in xrange(0, 20):
        f = (b & c) | ((~b) & d)
        a, b, c, d, e = (_rotl(a, 5) + f + e + 0x5a827999 +
                         w[i]) & 0xffffffff, a, _rotl(b, 30), c, d
    for i in xrange(20, 40):
        f = b ^ c ^ d
        a, b, c, d, e = (_rotl(a, 5) + f + e + 0x6ed9eba1 +
                         w[i]) & 0xffffffff, a, _rotl(b, 30), c, d
    for i in xrange(40, 60):
        f = (b & c) | (b & d) | (c & d)
        a, b, c, d, e = (_rotl(a, 5) + f + e + 0x8f1bbcdc +
                         w[i]) & 0xffffffff, a, _rotl(b, 30), c, d
    for i in xrange(60, 80):
        f = b ^ c ^ d
        a, b, c, d, e = (_rotl(a, 5) + f + e + 0xca62c1d6 +
                         w[i]) & 0xffffffff, a, _rotl(b, 30), c, d
    return [(x + y) & 0xffffffff for x, y in _izip(hh, (a, b, c, d, e))]
Beispiel #6
0
def slow_sha256_process(block,
                        hh,
                        _izip=izip,
                        _rotr32=_sha256_rotr32,
                        _k=_sha256_k):
    w = [0] * 64
    w[:16] = struct.unpack('>16L', block)
    for i in xrange(16, 64):
        w[i] = (w[i - 16] + (_rotr32(w[i - 15], 7) ^ _rotr32(w[i - 15], 18) ^
                             (w[i - 15] >> 3)) + w[i - 7] +
                (_rotr32(w[i - 2], 17) ^ _rotr32(w[i - 2], 19) ^
                 (w[i - 2] >> 10))) & 0xffffffff
    a, b, c, d, e, f, g, h = hh
    for i in xrange(64):
        t1 = h + (_rotr32(e, 6) ^ _rotr32(e, 11) ^ _rotr32(e, 25)) + (
            (e & f) ^ ((~e) & g)) + _k[i] + w[i]
        t2 = (_rotr32(a, 2) ^ _rotr32(a, 13) ^ _rotr32(a, 22)) + ((a & b) ^
                                                                  (a & c) ^
                                                                  (b & c))
        a, b, c, d, e, f, g, h = (t1 + t2) & 0xffffffff, a, b, c, (
            d + t1) & 0xffffffff, e, f, g
    return [(x + y) & 0xffffffff
            for x, y in _izip(hh, (a, b, c, d, e, f, g, h))]
Beispiel #7
0
 def update(self, m):
     if not isinstance(m, binary_types):
         raise TypeError('update() argument 1 must be string, not %s' %
                         (type(m).__name__))
     if not m:
         return
     buf, process = self._buffer, slow_md5_process
     lb, lm = len(buf), len(m)
     self._counter += lm
     self._buffer = None
     if lb + lm < 64:
         buf += binary_type(m)
         self._buffer = buf
     else:
         hh, i, _buffer = self._h, 0, buffer
         if lb:
             assert lb < 64
             i = 64 - lb
             hh = process(buf + binary_type(m[:i]), hh)
         for i in xrange(i, lm - 63, 64):
             hh = process(_buffer(m, i, 64), hh)
         self._h = hh
         self._buffer = binary_type(m[lm - ((lm - i) & 63):])
Beispiel #8
0
 def _flush_pbuf(self, prem, _pack=struct.pack, _buffer=buffer):
     if prem <= 0:
         pbuf, qbuf, ze_compress, _bufcap, qhc = self._pbuf, self._qbuf, self._ze_compress, self.bufcap, self._qhc
         if not prem:
             # Avoiding this long copy and doing `for data in pbuf:' instead
             # makes it a bit slower with bufcap == 8192. That's probably because
             # calling ze_compress twice is slow.
             data = b''.join(pbuf)  # Long copy.
             assert prem == _bufcap - len(data) + len(
                 pbuf[0])  # Consistency.
             dataq = ze_compress(data)
             if dataq:
                 qbuf.append(dataq)
                 self._qsize += len(dataq)
             if self._qsize >= _bufcap:
                 self._flush_qbuf()
             pbuf[:] = (qhc, )
             self.write_hint = _bufcap  # Must be last for canary.
         elif prem > -_bufcap:  # This is just speedup, could be commented out.
             data = b''.join(pbuf)
             assert prem == _bufcap - len(data) + len(
                 pbuf[0])  # Consistency.
             dataq = ze_compress(
                 data[:prem])  # This doesn't work if prem == 0.
             if dataq:
                 qbuf.append(dataq)
                 self._qsize += len(dataq)
             if self._qsize >= _bufcap:
                 self._flush_qbuf()
             pbuf[:] = (qhc, data[prem:]
                        )  # Long copy. It doesn't work if prem == 0.
             self.write_hint = prem + _bufcap  # Must be last for canary.
         else:
             lda = sum(len(s) for s in pbuf) - len(pbuf[0])
             assert prem == _bufcap - lda  # Consistency.
             ldam = lda % _bufcap
             #assert lda >= (_bufcap << 1)  # Follows from `prem > -_bufcap'.
             datap = pbuf.pop()
             i = _bufcap - (lda - len(datap))
             assert 0 < i < _bufcap  # Only last one is too long.
             #assert len(datap) - ldam - i == lda - _bufcap - ldam  # True but slow.
             #assert not (len(datap) - ldam - i) % _bufcap  # True but slow.
             pbuf.append(datap[:i])
             dataq = b''.join(pbuf)
             assert len(dataq) - len(pbuf[0]) == _bufcap  # Consistency.
             dataq = ze_compress(dataq)
             if dataq:
                 qbuf.append(dataq)
                 self._qsize += len(dataq)
             if self._qsize >= _bufcap:
                 self._flush_qbuf()
             for i in xrange(i, len(datap) - ldam, _bufcap):
                 dataq = ze_compress(qhc)
                 if dataq:
                     qbuf.append(dataq)
                     self._qsize += len(dataq)
                 dataq = ze_compress(_buffer(datap, i, _bufcap))
                 if dataq:
                     qbuf.append(dataq)
                     self._qsize += len(dataq)
                 if self._qsize >= _bufcap:
                     self._flush_qbuf()
             if ldam:
                 pbuf[:] = (qhc, datap[-ldam:])  # Long copy, unavoidable.
                 self.write_hint = _bufcap - ldam  # Must be last for canary.
             else:
                 pbuf[:] = (qhc, )
                 self.write_hint = _bufcap  # Must be last for canary.
Beispiel #9
0
 def _done_pbuf(self,
                _pack=struct.pack,
                _crc24=gpgs.crc24,
                _glnph=gpgs.get_last_nonpartial_packet_header):
     # Further .write()s will fail on tuple.append.
     pbuf, prem, self._pbuf, self.write_hint, _bufcap = self._pbuf, self.write_hint, (
     ), 0, self.bufcap
     if not pbuf:
         return  # Canary not found, don't flush.
     lda = sum(len(s) for s in pbuf) - len(pbuf[0])
     if prem != _bufcap - lda:
         return  # Canary not found, don't flush.
     pbuf[0] = _glnph(lda, len(pbuf[0]) > 1 and 11)
     pbuf = b''.join(pbuf)
     qbuf, qsize = self._qbuf, self._qsize
     qbuf.append(self._ze_compress(pbuf))
     del pbuf  # Save memory.
     qsize += len(qbuf[-1])
     if self._ze:
         qbuf.append(self._ze.flush())
         qsize += len(qbuf[-1])
         self._ze = ()
     if self._mdc_obj:
         qbuf.append(b'\xd3\x14')
         qsize += 2
     data = b''.join(qbuf)
     assert len(data) == qsize
     self._qbuf, qbuf, self._qsize = (), (
     ), 0  # Further .write()s will fail.
     if self._mdc_obj:
         self._mdc_update(data)
         data += self._mdc_obj.digest()  # Long copy, once.
         self._mdc_obj = ()
     self._mdc_update = ()
     ldbs = len(data) % self._bs
     j = len(data) - ldbs
     if j:
         self._add_encrypted(self._cfb_encrypt(buffer(data, 0, j)))
     if ldbs:  # Encrypt the last partial block.
         self._add_encrypted(
             self._cfb_encrypt(data[j:] + b'\0' * (self._bs - ldbs))[:ldbs])
     data = self._cpre  # Flush the last encrypted partial packet.
     self._cpre = ()
     header = _glnph(len(data), self._ciphp and self._packet_type)
     fwrite = self._fwrite
     if self._b2a:
         abuf, asize, acrc, _b2a = self._abuf, self._asize, self._acrc, self._b2a
         self._abuf, self._asize, self._acrc, self._b2a = (), 0, (), ()
         acrc = _crc24(data, _crc24(header, acrc))
         abuf.append(header)
         abuf.append(data)
         asize += len(header) + len(data)
         adata, lam = b''.join(abuf), asize % 48
         assert len(adata) == asize
         lal, _buffer = asize - lam, buffer
         for i in xrange(0, lal, 48):
             fwrite(_b2a(_buffer(adata, i, 48)))  # Contains trailing b'\n'.
         abuf[:] = (adata[lal:], )
         adata, asize = (), lam
         fwrite(gpgs.get_gpg_armor_trailer(abuf, asize, acrc, _b2a))
     else:
         fwrite(header)
         fwrite(data)