def scalarmult_key(dst, P, s): dst = _ensure_dst_key(dst) crypto.decodepoint_into(tmp_pt_1, P) crypto.decodeint_into_noreduce(tmp_sc_1, s) crypto.scalarmult_into(tmp_pt_2, tmp_pt_1, tmp_sc_1) crypto.encodepoint_into(dst, tmp_pt_2) return dst
def sc_mul(dst, a, b): dst = _ensure_dst_key(dst) crypto.decodeint_into_noreduce(tmp_sc_1, a) crypto.decodeint_into_noreduce(tmp_sc_2, b) crypto.sc_mul_into(tmp_sc_3, tmp_sc_1, tmp_sc_2) crypto.encodeint_into(dst, tmp_sc_3) return dst
def _acc(self, scalar, point): crypto.decodeint_into_noreduce(tmp_sc_1, scalar) crypto.decodepoint_into(tmp_pt_2, point) crypto.scalarmult_into(tmp_pt_3, tmp_pt_2, tmp_sc_1) crypto.point_add_into(self.acc, self.acc, tmp_pt_3) self.current_idx += 1 self.size += 1
def add_keys2(dst, a, b, B): dst = _ensure_dst_key(dst) crypto.decodeint_into_noreduce(tmp_sc_1, a) crypto.decodeint_into_noreduce(tmp_sc_2, b) crypto.decodepoint_into(tmp_pt_1, B) crypto.add_keys2_into(tmp_pt_2, tmp_sc_1, tmp_sc_2, tmp_pt_1) crypto.encodepoint_into(dst, tmp_pt_2) return dst
def test_bpp_bprime(self): N, M = 64, 4 MN = N * M y = unhexlify( b'60421950bee0aab949e63336db1eb9532dba6b4599c5cd9fb1dbde909114100e' ) z = unhexlify( b'e0408b528e9d35ccb8386b87f39b85c724740644f4db412483a8852cdb3ceb00' ) zc = crypto.decodeint_into(None, z) z_sq = bp._sc_mul(None, z, z) sv = [1234, 8789, 4455, 6697] sv = [crypto.encodeint_into(None, crypto.Scalar(x)) for x in sv] num_inp = len(sv) sc_zero = crypto.decodeint_into_noreduce(None, bp._ZERO) sc_mone = crypto.decodeint_into_noreduce(None, bp._MINUS_ONE) def e_xL(idx, d=None): j, i = idx // bp._BP_N, idx % bp._BP_N r = None if j >= num_inp: r = sc_mone elif sv[j][i // 8] & (1 << i % 8): r = sc_zero else: r = sc_mone if d: return crypto.sc_copy(d, r) return r aR = bp.KeyVEval(MN, lambda i, d: e_xL(i, d), raw=True) d_vct = bp.VctD(N, M, z_sq, raw=True) ypow_back = bp.KeyVPowersBackwards(MN + 1, y, raw=True) aR1_sc1 = crypto.Scalar() def aR1_fnc(i, d): crypto.sc_add_into(aR1_sc1, aR.to(i), zc) crypto.sc_muladd_into(aR1_sc1, d_vct[i], ypow_back[MN - i], aR1_sc1) return crypto.encodeint_into(d, aR1_sc1) bprime = bp.KeyVEval(MN, aR1_fnc, raw=False) # aR1 b64 = bp._copy_key(None, bprime.to(64)) b65 = bp._copy_key(None, bprime.to(65)) b128 = bp._copy_key(None, bprime.to(128)) b65_2 = bp._copy_key(None, bprime.to(65)) b64_2 = bp._copy_key(None, bprime.to(64)) _ = bprime[89] b128_2 = bp._copy_key(None, bprime.to(128)) self.assertEqual(b64, b64_2) self.assertEqual(b65, b65_2) self.assertEqual(b128, b128_2)
def vector_z_two_i(logN, zpow, twoN, i, dst_sc=None): """ 0...N|N+1...2N|2N+1...3N|.... zt[i] = z^b 2^c, where b = 2 + blockNumber. BlockNumber is idx of N block c = i % N = i - N * blockNumber """ j = i >> logN crypto.decodeint_into_noreduce(tmp_sc_1, zpow.to(j + 2)) crypto.decodeint_into_noreduce(tmp_sc_2, twoN.to(i & ((1 << logN) - 1))) crypto.sc_mul_into(dst_sc, tmp_sc_1, tmp_sc_2) return dst_sc
def inner_product(a, b, dst=None): if len(a) != len(b): raise ValueError("Incompatible sizes of a and b") dst = _ensure_dst_key(dst) crypto.sc_init_into(tmp_sc_1, 0) for i in range(len(a)): crypto.decodeint_into_noreduce(tmp_sc_2, a.to(i)) crypto.decodeint_into_noreduce(tmp_sc_3, b.to(i)) crypto.sc_muladd_into(tmp_sc_1, tmp_sc_2, tmp_sc_3, tmp_sc_1) gc_iter(i) crypto.encodeint_into(dst, tmp_sc_1) return dst
def vector_exponent_custom(A, B, a, b, dst=None): dst = _ensure_dst_key(dst) crypto.identity_into(tmp_pt_2) for i in range(len(a)): crypto.decodeint_into_noreduce(tmp_sc_1, a.to(i)) crypto.decodepoint_into(tmp_pt_3, A.to(i)) crypto.decodeint_into_noreduce(tmp_sc_2, b.to(i)) crypto.decodepoint_into(tmp_pt_4, B.to(i)) crypto.add_keys3_into(tmp_pt_1, tmp_sc_1, tmp_pt_3, tmp_sc_2, tmp_pt_4) crypto.point_add_into(tmp_pt_2, tmp_pt_2, tmp_pt_1) gc_iter(i) crypto.encodepoint_into(dst, tmp_pt_2) return dst
def vector_powers(x, n, dst=None, dynamic=False, **kwargs): if dynamic: return KeyVPowers(n, x, **kwargs) dst = _ensure_dst_keyvect(dst, n) if n == 0: return dst dst.read(0, _ONE) if n == 1: return dst dst.read(1, x) crypto.decodeint_into_noreduce(tmp_sc_1, x) crypto.decodeint_into_noreduce(tmp_sc_2, x) for i in range(2, n): crypto.sc_mul_into(tmp_sc_1, tmp_sc_1, tmp_sc_2) crypto.encodeint_into(tmp_bf_0, tmp_sc_1) dst.read(i, tmp_bf_0) gc_iter(i) return dst
def hadamard_fold(v, a, b, into=None, into_offset=0): """ Folds a curvepoint array using a two way scaled Hadamard product ln = len(v); h = ln // 2 v[i] = a * v[i] + b * v[h + i] """ h = len(v) // 2 crypto.decodeint_into_noreduce(tmp_sc_1, a) crypto.decodeint_into_noreduce(tmp_sc_2, b) into = into if into else v for i in range(h): crypto.decodepoint_into(tmp_pt_1, v.to(i)) crypto.decodepoint_into(tmp_pt_2, v.to(h + i)) crypto.add_keys3_into(tmp_pt_3, tmp_sc_1, tmp_pt_1, tmp_sc_2, tmp_pt_2) crypto.encodepoint_into(tmp_bf_0, tmp_pt_3) into.read(i + into_offset, tmp_bf_0) gc_iter(i) return into
def sc_mulsub(dst, a, b, c): dst = _ensure_dst_key(dst) crypto.decodeint_into_noreduce(tmp_sc_1, a) crypto.decodeint_into_noreduce(tmp_sc_2, b) crypto.decodeint_into_noreduce(tmp_sc_3, c) crypto.sc_mulsub_into(tmp_sc_4, tmp_sc_1, tmp_sc_2, tmp_sc_3) crypto.encodeint_into(dst, tmp_sc_4) return dst
def cross_inner_product(l0, r0, l1, r1): """ t1_1 = l0 . r1, t1_2 = l1 . r0 t1 = t1_1 + t1_2, t2 = l1 . r1 """ sc_t1_1, sc_t1_2, sc_t2 = alloc_scalars(3) cl0, cr0, cl1, cr1 = alloc_scalars(4) for i in range(len(l0)): crypto.decodeint_into_noreduce(cl0, l0.to(i)) crypto.decodeint_into_noreduce(cr0, r0.to(i)) crypto.decodeint_into_noreduce(cl1, l1.to(i)) crypto.decodeint_into_noreduce(cr1, r1.to(i)) crypto.sc_muladd_into(sc_t1_1, cl0, cr1, sc_t1_1) crypto.sc_muladd_into(sc_t1_2, cl1, cr0, sc_t1_2) crypto.sc_muladd_into(sc_t2, cl1, cr1, sc_t2) gc_iter(i) crypto.sc_add_into(sc_t1_1, sc_t1_1, sc_t1_2) return crypto.encodeint(sc_t1_1), crypto.encodeint(sc_t2)
def scalar_fold(v, a, b, into=None, into_offset=0): """ ln = len(v); h = ln // 2 v[i] = v[i] * a + v[h+i] * b) """ h = len(v) // 2 crypto.decodeint_into_noreduce(tmp_sc_1, a) crypto.decodeint_into_noreduce(tmp_sc_2, b) into = into if into else v for i in range(h): crypto.decodeint_into_noreduce(tmp_sc_3, v.to(i)) crypto.decodeint_into_noreduce(tmp_sc_4, v.to(h + i)) crypto.sc_mul_into(tmp_sc_3, tmp_sc_3, tmp_sc_1) crypto.sc_mul_into(tmp_sc_4, tmp_sc_4, tmp_sc_2) crypto.sc_add_into(tmp_sc_3, tmp_sc_3, tmp_sc_4) crypto.encodeint_into(tmp_bf_0, tmp_sc_3) into.read(i + into_offset, tmp_bf_0) gc_iter(i) return into
def invert(dst, x): dst = _ensure_dst_key(dst) crypto.decodeint_into_noreduce(tmp_sc_1, x) crypto.sc_inv_into(tmp_sc_2, tmp_sc_1) crypto.encodeint_into(dst, tmp_sc_2) return dst
def scalarmult_base(dst, x): dst = _ensure_dst_key(dst) crypto.decodeint_into_noreduce(tmp_sc_1, x) crypto.scalarmult_base_into(tmp_pt_1, tmp_sc_1) crypto.encodepoint_into(dst, tmp_pt_1) return dst
def _prove_batch_main( self, V, gamma, aL, aR, hash_cache, logM, logN, M, N, proof_v8=False ): logMN = logM + logN MN = M * N hash_vct_to_scalar(hash_cache, V) # Extended precomputed GiHi Gprec = self._gprec_aux(MN) Hprec = self._hprec_aux(MN) # PAPER LINES 38-39 alpha = sc_gen() ve = _ensure_dst_key() A = _ensure_dst_key() vector_exponent_custom(Gprec, Hprec, aL, aR, ve) add_keys(A, ve, scalarmult_base(tmp_bf_1, alpha)) if not proof_v8: scalarmult_key(A, A, _INV_EIGHT) self.gc(11) # PAPER LINES 40-42 sL = self.sL_vct(MN) sR = self.sR_vct(MN) rho = sc_gen() vector_exponent_custom(Gprec, Hprec, sL, sR, ve) S = _ensure_dst_key() add_keys(S, ve, scalarmult_base(tmp_bf_1, rho)) if not proof_v8: scalarmult_key(S, S, _INV_EIGHT) del ve self.gc(12) # PAPER LINES 43-45 y = _ensure_dst_key() hash_cache_mash(y, hash_cache, A, S) if y == _ZERO: return (0,) z = _ensure_dst_key() hash_to_scalar(hash_cache, y) copy_key(z, hash_cache) if z == _ZERO: return (0,) # Polynomial construction by coefficients zMN = const_vector(z, MN) l0 = _ensure_dst_keyvect(None, MN) vector_subtract(aL, zMN, l0) l1 = sL self.gc(13) # This computes the ugly sum/concatenation from PAPER LINE 65 # r0 = aR + z r0 = vector_add(aR, zMN) del zMN self.gc(14) # r0 = r0 \odot yMN => r0[i] = r0[i] * y^i # r1 = sR \odot yMN => r1[i] = sR[i] * y^i yMN = vector_powers(y, MN, dynamic=False) hadamard(r0, yMN, dst=r0) self.gc(15) # r0 = r0 + zero_twos zpow = vector_powers(z, M + 2) twoN = self._two_aux(MN) zero_twos = vector_z_two(N, logN, M, zpow, twoN, dynamic=True, raw=True) vector_gen( r0, len(r0), lambda i, d: crypto.encodeint_into( d, crypto.sc_add_into( tmp_sc_1, zero_twos[i], # noqa: F821 crypto.decodeint_into_noreduce(tmp_sc_2, r0.to(i)), # noqa: F821 ), ), ) del (zero_twos, twoN) self.gc(15) # Polynomial construction before PAPER LINE 46 # r1 = KeyVEval(MN, lambda i, d: sc_mul(d, yMN[i], sR[i])) # r1 optimization possible, but has clashing sc registers. # Moreover, max memory complexity is 4MN as below (while loop). r1 = hadamard(yMN, sR, yMN) # re-use yMN vector for r1 del (yMN, sR) self.gc(16) # Inner products # l0 = aL - z r0 = ((aR + z) \cdot ypow) + zt # l1 = sL r1 = sR \cdot ypow # t1_1 = l0 . r1, t1_2 = l1 . r0 # t1 = t1_1 + t1_2, t2 = l1 . r1 # l = l0 \odot x*l1 r = r0 \odot x*r1 t1, t2 = cross_inner_product(l0, r0, l1, r1) self.gc(17) # PAPER LINES 47-48 tau1, tau2 = sc_gen(), sc_gen() T1, T2 = _ensure_dst_key(), _ensure_dst_key() add_keys(T1, scalarmultH(tmp_bf_1, t1), scalarmult_base(tmp_bf_2, tau1)) if not proof_v8: scalarmult_key(T1, T1, _INV_EIGHT) add_keys(T2, scalarmultH(tmp_bf_1, t2), scalarmult_base(tmp_bf_2, tau2)) if not proof_v8: scalarmult_key(T2, T2, _INV_EIGHT) del (t1, t2) self.gc(17) # PAPER LINES 49-51 x = _ensure_dst_key() hash_cache_mash(x, hash_cache, z, T1, T2) if x == _ZERO: return (0,) # PAPER LINES 52-53 taux = _ensure_dst_key() copy_key(taux, _ZERO) sc_mul(taux, tau1, x) xsq = _ensure_dst_key() sc_mul(xsq, x, x) sc_muladd(taux, tau2, xsq, taux) del (xsq, tau1, tau2) for j in range(1, len(V) + 1): sc_muladd(taux, zpow.to(j + 1), gamma[j - 1], taux) del zpow self.gc(18) mu = _ensure_dst_key() sc_muladd(mu, x, rho, alpha) del (rho, alpha) # PAPER LINES 54-57 # l = l0 \odot x*l1, has to evaluated as it becomes aprime in the loop l = vector_gen( l0, len(l0), lambda i, d: sc_add(d, d, sc_mul(tmp_bf_1, l1.to(i), x)), # noqa: F821 ) del (l0, l1, sL) self.gc(19) # r = r0 \odot x*r1, has to evaluated as it becomes bprime in the loop r = vector_gen( r0, len(r0), lambda i, d: sc_add(d, d, sc_mul(tmp_bf_1, r1.to(i), x)), # noqa: F821 ) t = inner_product(l, r) del (r1, r0) self.gc(19) # PAPER LINES 32-33 x_ip = hash_cache_mash(None, hash_cache, x, taux, mu, t) if x_ip == _ZERO: return 0, None # PHASE 2 # These are used in the inner product rounds nprime = MN Gprime = _ensure_dst_keyvect(None, MN) Hprime = _ensure_dst_keyvect(None, MN) aprime = l bprime = r yinv = invert(None, y) yinvpow = init_key(_ONE) self.gc(20) for i in range(0, MN): Gprime.read(i, Gprec.to(i)) scalarmult_key(tmp_bf_0, Hprec.to(i), yinvpow) Hprime.read(i, tmp_bf_0) sc_mul(yinvpow, yinvpow, yinv) gc_iter(i) self.gc(21) L = _ensure_dst_keyvect(None, logMN) R = _ensure_dst_keyvect(None, logMN) cL = _ensure_dst_key() cR = _ensure_dst_key() winv = _ensure_dst_key() w_round = _ensure_dst_key() tmp = _ensure_dst_key() round = 0 _tmp_k_1 = _ensure_dst_key() # PAPER LINE 13 while nprime > 1: # PAPER LINE 15 npr2 = nprime nprime >>= 1 self.gc(22) # PAPER LINES 16-17 inner_product( aprime.slice_view(0, nprime), bprime.slice_view(nprime, npr2), cL ) inner_product( aprime.slice_view(nprime, npr2), bprime.slice_view(0, nprime), cR ) self.gc(23) # PAPER LINES 18-19 vector_exponent_custom( Gprime.slice_view(nprime, npr2), Hprime.slice_view(0, nprime), aprime.slice_view(0, nprime), bprime.slice_view(nprime, npr2), tmp_bf_0, ) sc_mul(tmp, cL, x_ip) add_keys(tmp_bf_0, tmp_bf_0, scalarmultH(_tmp_k_1, tmp)) if not proof_v8: scalarmult_key(tmp_bf_0, tmp_bf_0, _INV_EIGHT) L.read(round, tmp_bf_0) self.gc(24) vector_exponent_custom( Gprime.slice_view(0, nprime), Hprime.slice_view(nprime, npr2), aprime.slice_view(nprime, npr2), bprime.slice_view(0, nprime), tmp_bf_0, ) sc_mul(tmp, cR, x_ip) add_keys(tmp_bf_0, tmp_bf_0, scalarmultH(_tmp_k_1, tmp)) if not proof_v8: scalarmult_key(tmp_bf_0, tmp_bf_0, _INV_EIGHT) R.read(round, tmp_bf_0) self.gc(25) # PAPER LINES 21-22 hash_cache_mash(w_round, hash_cache, L.to(round), R.to(round)) if w_round == _ZERO: return (0,) # PAPER LINES 24-25 invert(winv, w_round) self.gc(26) hadamard_fold(Gprime, winv, w_round) self.gc(27) hadamard_fold(Hprime, w_round, winv, Gprime, nprime) Hprime.realloc_init_from(nprime, Gprime, nprime, round < 2) self.gc(28) # PAPER LINES 28-29 scalar_fold(aprime, w_round, winv, Gprime, nprime) aprime.realloc_init_from(nprime, Gprime, nprime, round < 2) self.gc(29) scalar_fold(bprime, winv, w_round, Gprime, nprime) bprime.realloc_init_from(nprime, Gprime, nprime, round < 2) self.gc(30) # Finally resize Gprime which was buffer for all ops Gprime.resize(nprime, realloc=True) round += 1 from apps.monero.xmr.serialize_messages.tx_rsig_bulletproof import Bulletproof return ( 1, Bulletproof( V=V, A=A, S=S, T1=T1, T2=T2, taux=taux, mu=mu, L=L, R=R, a=aprime.to(0), b=bprime.to(0), t=t, ), )