Beispiel #1
0
def run_one_test(nInBits, nCopies):
    nOutBits = nInBits

    (in0v, in1v, typv) = randutil.rand_ckt(nOutBits, nInBits)
    inputs = randutil.rand_inputs(nInBits, nCopies)

    circuit = CircuitProver(nCopies, 2**nInBits, [in0v], [in1v], [typv])
    circuit.set_inputs(inputs)

    z1 = [Defs.gen_random() for _ in range(0, nOutBits)]
    z2 = [Defs.gen_random() for _ in range(0, circuit.nCopyBits)]

    circuit.set_z(z1, z2)

    # mlExt of outputs
    outflat = util.flatten(circuit.ckt_outputs)
    inLayer_mults = LayerComputeBeta(nOutBits + circuit.nCopyBits, z1 + z2)
    assert len(outflat) == len(inLayer_mults.outputs)
    inLayermul = util.mul_vecs(inLayer_mults.outputs, outflat)
    inLayerExt = sum(inLayermul) % Defs.prime

    w1 = [Defs.gen_random() for _ in range(0, nInBits)]
    w2 = [Defs.gen_random() for _ in range(0, nInBits)]
    w3 = [Defs.gen_random() for _ in range(0, circuit.nCopyBits)]

    initOutputs = circuit.get_outputs()

    assert inLayerExt == (initOutputs[0] + sum(initOutputs)) % Defs.prime

    for i in range(0, len(w3)):
        circuit.next_round(w3[i])
        circuit.get_outputs()

    for i in range(0, len(w1)):
        circuit.next_round(w1[i])
        circuit.get_outputs()

    for i in range(0, len(w2)):
        circuit.next_round(w2[i])
        finalOutputs = circuit.get_outputs()

    # check the outputs by computing mlext of layer input directly

    inflat = util.flatten(inputs)

    v1_mults = LayerComputeBeta(circuit.layers[0].nOutBits + circuit.nCopyBits,
                                w1 + w3)
    assert len(inflat) == len(v1_mults.outputs)
    v1inmul = util.mul_vecs(v1_mults.outputs, inflat)
    v1 = sum(v1inmul) % Defs.prime

    v2_mults = LayerComputeBeta(circuit.layers[0].nOutBits + circuit.nCopyBits,
                                w2 + w3)
    assert len(inflat) == len(v2_mults.outputs)
    v2inmul = util.mul_vecs(v2_mults.outputs, inflat)
    v2 = sum(v2inmul) % Defs.prime

    assert v1 == finalOutputs[0]
    assert v2 == sum(finalOutputs) % Defs.prime
Beispiel #2
0
def run_one_test(nbits, squawk, nbins, pattern):
    z = [ Defs.gen_random() for _ in range(0, nbits) ]

    inv = [ Defs.gen_random() for _ in range(0, (2 ** nbits) - nbins) ]
    if pattern is 0:
        inv += [ 0 for _ in range(0, nbins) ]
    elif pattern is 1:
        inv += [ 1 for _ in range(0, nbins) ]
    elif pattern is 2:
        inv += [ (i % 2) for i in range(0, nbins) ]
    elif pattern is 3:
        inv += [ ((i + 1) % 2) for i in range(0, nbins) ]
    else:
        inv += [ random.randint(0, 1) for _ in range(0, nbins) ]

    assert len(inv) == (2 ** nbits)

    fa = FArith()
    oldrec = fa.new_cat("old")
    newrec = fa.new_cat("new")
    nw2rec = fa.new_cat("nw2")
    nw3rec = fa.new_cat("nw3")

    oldbeta = LayerComputeBeta(nbits, z, oldrec)
    oldval = sum(util.mul_vecs(oldbeta.outputs, inv)) % Defs.prime
    oldrec.did_mul(len(inv))
    oldrec.did_add(len(inv)-1)

    newcomp = VerifierIOMLExt(z, newrec)
    newval = newcomp.compute(inv)

    nw2comp = LayerComputeV(nbits, nw2rec)
    nw2comp.other_factors = []
    nw2comp.set_inputs(inv)
    for zz in z:
        nw2comp.next_round(zz)
    nw2val = nw2comp.prevPassValue

    nw3comp = VerifierIOMLExt(z, nw3rec)
    nw3val = nw3comp.compute_sqrtbits(inv)

    assert oldval == newval, "error for inputs (new) %s : %s" % (str(z), str(inv))
    assert oldval == nw2val, "error for inputs (nw2) %s : %s" % (str(z), str(inv))
    assert oldval == nw3val, "error for inputs (nw3) %s : %s" % (str(z), str(inv))

    if squawk:
        print
        print "nbits: %d" % nbits
        print "OLD: %d mul %d add %d sub" % oldrec.get_counts()
        print "NEW: %d mul %d add %d sub" % newrec.get_counts()
        print "NW2: %d mul %d add %d sub" % nw2rec.get_counts()
        print "NW3: %d mul %d add %d sub" % nw3rec.get_counts()

    return newrec.get_counts()
Beispiel #3
0
def run_test():
    # pylint: disable=global-variable-undefined,redefined-outer-name
    tinputs = [Defs.gen_random() for _ in range(0, nOutBits)]
    taus = [Defs.gen_random() for _ in range(0, nOutBits)]
    lcv.set_inputs(tinputs)

    inputs = [
        util.chi(util.numToBin(x, nOutBits), tinputs)
        for x in range(0, 2**nOutBits)
    ]

    global scratch
    global outputs

    scratch = list(inputs)
    outputs = list(inputs)

    def compute_next_value(tau):
        global scratch
        global outputs

        nscratch = []
        tauInv = (1 - tau) % Defs.prime

        for i in range(0, len(scratch) / 2):
            val = ((scratch[2 * i] * tauInv) +
                   (scratch[2 * i + 1] * tau)) % Defs.prime
            nscratch.append(val)

        del val
        scratch = nscratch

        #ndups = len(outputs) / len(scratch)
        #nouts = [ [val] * ndups for val in scratch ]
        outputs = scratch
        #outputs = [item for sublist in nouts for item in sublist]

    for i in range(0, nOutBits):
        assert lcv.inputs == inputs
        assert lcv.outputs == outputs
        assert lcv.scratch == scratch

        compute_next_value(taus[i])
        lcv.next_round(taus[i])

        assert outputs == lcv.outputs
        assert scratch == lcv.scratch

    assert lcv.prevPassValue == scratch[0]
    assert all([lcv.prevPassValue == elm[0] for elm in lcv.outputs_fact])
Beispiel #4
0
def run_test(nOutBits, nValues):
    # pylint: disable=redefined-outer-name,global-variable-undefined
    inputs = [Defs.gen_random() for _ in range(0, nValues)]
    taus = [Defs.gen_random() for _ in range(0, nOutBits)]

    lcv.set_inputs(inputs)

    global scratch
    global outputs

    inputs += [0] * (2**nOutBits - nValues)
    scratch = list(inputs)
    outputs = list(inputs)

    def compute_next_value(tau):
        global scratch
        global outputs

        nscratch = []
        tauInv = (1 - tau) % Defs.prime

        for i in range(0, len(scratch) / 2):
            val = ((scratch[2 * i] * tauInv) +
                   (scratch[2 * i + 1] * tau)) % Defs.prime
            nscratch.append(val)

        del val
        scratch = nscratch

        ndups = len(outputs) / len(scratch)
        nouts = [[val] * ndups for val in scratch]
        outputs = [item for sublist in nouts for item in sublist]

    for i in range(0, nOutBits):
        assert lcv.inputs == inputs
        assert lcv.outputs == outputs
        assert lcv.scratch == scratch

        compute_next_value(taus[i])
        lcv.next_round(taus[i])

        if i < nOutBits - 1:
            assert outputs == lcv.outputs
            assert scratch == lcv.scratch

    assert lcv.prevPassValue == scratch[0]
Beispiel #5
0
def rand_inputs(nInBits, nCopies, inLay=None):
    out = []

    if inLay is None:
        inLay = [None] * (2**nInBits)
    else:
        nInBits = util.clog2(len(inLay))
        inLay += [0] * (2**nInBits - len(inLay))

    for _ in range(0, nCopies):
        out.append(
            [Defs.gen_random() if elm is None else elm for elm in inLay])

    return out
    def run(self, inputs, muxbits=None):
        ############
        # 0. Setup #
        ############
        assert self.prover is not None

        # set inputs and outputs
        self.prover.set_inputs(inputs)
        self.inputs = []
        for ins in inputs:
            self.inputs.extend(ins + [0] * (2**self.nInBits - len(ins)))
        self.outputs = util.flatten(self.prover.ckt_outputs)

        # set muxbits
        self.muxbits = muxbits
        if muxbits is not None:
            self.prover.set_muxbits(muxbits)

        ###############################################
        # 1. Compute multilinear extension of outputs #
        ###############################################
        nOutBits = util.clog2(len(self.in0vv[-1]))
        assert util.clog2(len(self.outputs)) == nOutBits + self.nCopyBits

        # pad out to power-of-2 number of copies
        self.outputs += [0] * (2**(nOutBits + self.nCopyBits) -
                               len(self.outputs))

        # generate random point in (z1, z2) \in F^{nOutBits + nCopyBits}
        z1 = [Defs.gen_random() for _ in range(0, nOutBits)]
        z2 = [Defs.gen_random() for _ in range(0, self.nCopyBits)]
        self.prover.set_z(z1, z2)

        # eval mlext of output at (z1,z2)
        output_mlext = VerifierIOMLExt(z1 + z2, self.out_a)
        expectNext = output_mlext.compute(self.outputs)

        ##########################################
        # 2. Interact with prover for each layer #
        ##########################################
        for lay in range(0, len(self.in0vv)):
            nInBits = self.layInBits[lay]
            nOutBits = self.layOutBits[lay]

            # random coins for this round
            w3 = [Defs.gen_random() for _ in range(0, self.nCopyBits)]
            w1 = [Defs.gen_random() for _ in range(0, nInBits)]
            w2 = [Defs.gen_random() for _ in range(0, nInBits)]

            # convenience
            ws = w3 + w1 + w2

            ###################
            ### A. Sumcheck ###
            ###################
            for rd in range(0, 2 * nInBits + self.nCopyBits):
                # get output from prv and check against expected value
                outs = self.prover.get_outputs()
                gotVal = (outs[0] + sum(outs)) % Defs.prime
                self.sc_a.did_add(len(outs))

                assert expectNext == gotVal, "Verification failed in round %d of layer %d" % (
                    rd, lay)

                # go to next round
                self.prover.next_round(ws[rd])
                expectNext = util.horner_eval(outs, ws[rd], self.sc_a)

            outs = self.prover.get_outputs()
            v1 = outs[0] % Defs.prime
            v2 = sum(outs) % Defs.prime
            self.tV_a.did_add(len(outs) - 1)

            ############################################
            ### B. Evaluate mlext of wiring predicates #
            ############################################
            tV_eval = self.eval_mlext(lay, z1, z2, w1, w2, w3, v1, v2)

            # check that we got the correct value from the last round of the sumcheck
            assert expectNext == tV_eval, "Verification failed computing tV for layer %d" % lay

            ###############################
            ### C. Extend to next layer ###
            ###############################
            tau = Defs.gen_random()
            if lay < len(self.in0vv) - 1:
                self.prover.next_layer(tau)
            expectNext = util.horner_eval(outs, tau, self.nlay_a)

            # next z values
            # z1 = w1 + ( w2 - w1 ) * tau; z2 is just w3
            z1 = [(elm1 + (elm2 - elm1) * tau) % Defs.prime
                  for (elm1, elm2) in zip(w1, w2)]
            self.nlay_a.did_sub(len(w1))
            self.nlay_a.did_mul(len(w1))
            self.nlay_a.did_add(len(w1))
            z2 = w3

        ##############################################
        # 3. Compute multilinear extension of inputs #
        ##############################################
        # Finally, evaluate mlext of input at z1, z2
        assert util.clog2(len(self.inputs)) == self.nInBits + self.nCopyBits
        self.inputs += [0] * (2**(self.nInBits + self.nCopyBits) -
                              len(self.inputs))
        input_mlext = VerifierIOMLExt(z1 + z2, self.in_a)
        input_mlext_eval = input_mlext.compute(self.inputs)

        assert input_mlext_eval == expectNext, "Verification failed checking input mlext"
Beispiel #7
0
def run_one_test(nInBits, nCopies):
    nOutBits = nInBits

    circuit = _DummyCircuitProver(nCopies)
    inLayer = InputLayer(nOutBits)

    (in0v, in1v, typv) = randutil.rand_ckt(nOutBits, nInBits)
    typc = [tc.cgate for tc in typv]
    inputs = randutil.rand_inputs(nInBits, nCopies)

    # compute outputs
    ckt = ArithCircuit()
    inCktLayer = ArithCircuitInputLayer(ckt, nOutBits)
    outCktLayer = ArithCircuitLayer(ckt, inCktLayer, in0v, in1v, typc)
    ckt.layers = [inCktLayer, outCktLayer]
    outputs = []
    for inp in inputs:
        ckt.run(inp)
        outputs.append(ckt.outputs)

    z1 = [Defs.gen_random() for _ in range(0, nOutBits)]
    z2 = [Defs.gen_random() for _ in range(0, circuit.nCopyBits)]

    outLayer = LayerProver(inLayer, circuit, in0v, in1v, typv)
    outLayer.set_inputs(inputs)
    outLayer.set_z(z1, z2)

    # mlExt of outputs
    outflat = util.flatten(outputs)
    inLayer_mults = LayerComputeBeta(nOutBits + outLayer.circuit.nCopyBits,
                                     z1 + z2)
    assert len(outflat) == len(inLayer_mults.outputs)
    inLayermul = util.mul_vecs(inLayer_mults.outputs, outflat)
    inLayerExt = sum(inLayermul) % Defs.prime

    w3 = [Defs.gen_random() for _ in range(0, circuit.nCopyBits)]
    w1 = [Defs.gen_random() for _ in range(0, nInBits)]
    w2 = [Defs.gen_random() for _ in range(0, nInBits)]

    outLayer.compute_outputs()
    initOutputs = outLayer.output

    assert inLayerExt == (initOutputs[0] + sum(initOutputs)) % Defs.prime

    for i in range(0, len(w3)):
        outLayer.next_round(w3[i])
        outLayer.compute_outputs()

    for i in range(0, len(w1)):
        outLayer.next_round(w1[i])
        outLayer.compute_outputs()

    for i in range(0, len(w2)):
        outLayer.next_round(w2[i])
        outLayer.compute_outputs()

    finalOutputs = outLayer.output

    # check the outputs by computing mlext of layer input directly

    inflat = util.flatten(inputs)

    v1_mults = LayerComputeBeta(
        outLayer.prevL.nOutBits + outLayer.circuit.nCopyBits, w1 + w3)
    assert len(inflat) == len(v1_mults.outputs)
    v1inmul = util.mul_vecs(v1_mults.outputs, inflat)
    v1 = sum(v1inmul) % Defs.prime

    v2_mults = LayerComputeBeta(
        outLayer.prevL.nOutBits + outLayer.circuit.nCopyBits, w2 + w3)
    assert len(inflat) == len(v2_mults.outputs)
    v2inmul = util.mul_vecs(v2_mults.outputs, inflat)
    v2 = sum(v2inmul) % Defs.prime

    assert v1 == finalOutputs[0]
    assert v2 == sum(finalOutputs) % Defs.prime