Example #1
0
def test_simple_bkz_reduction(block_size=10):
    for n in dimensions:
        set_random_seed(n)
        A = make_integer_matrix(n)
        LLL.reduction(A)
        B = copy(A)
        BKZ.reduction(B, BKZ.Param(block_size=block_size))

        C = copy(A)
        SimpleBKZ(C)(block_size=block_size)

        assert abs(C[0].norm() - B[0].norm()) < 0.1
        assert abs(C[0].norm() < A[0].norm())
Example #2
0
def test_gso_io():
    for m, n in dimensions:
        if m <= 2 or n <= 2:
            continue

        A = make_integer_matrix(m, n)
        v = list(A[0])
        LLL.reduction(A)

        for float_type in float_types:
            M = GSO.Mat(copy(A), float_type=float_type)
            M.update_gso()
            w = M.babai(v)
            v_ = IntegerMatrix.from_iterable(1, m, w) * A
            v_ = list(v_[0])
            assert v == v_
Example #3
0
    def __init__(self, A):
        """Construct a new BKZ reduction instance.

        :param A: Integer matrix to reduce.

        """
        if not isinstance(A, IntegerMatrix):
            raise TypeError("Matrix must be IntegerMatrix but got type '%s'" %
                            type(A))

        # run LLL first
        wrapper = LLL.Wrapper(A)
        wrapper()

        self.A = A
        self.m = GSO.Mat(A, flags=GSO.ROW_EXPO)
        self.lll_obj = LLL.Reduction(self.m)
Example #4
0
def test_gso_io():
    for int_type in int_types:
        for m, n in dimensions:
            if m <= 2 or n <= 2:
                continue

            A = make_integer_matrix(m, n, int_type=int_type)
            v = list(A[0])
            LLL.reduction(A)

            for float_type in float_types:
                M = GSO.Mat(copy(A), float_type=float_type)
                M.update_gso()
                w = M.babai(v)
                v_ = IntegerMatrix.from_iterable(1, m, w) * A
                v_ = list(v_[0])
                assert v == v_
def main_cleanbkz_mpi_master(filename, bs, cores):

    try:
        with open(filename, "rb") as f:
            mat = pickle.load(f)
            #print "len(mat)", len(mat)
            #if (len(mat) > 1):
            #   mat = mat[0]
        if isinstance(mat, IntegerMatrix):
            Ainput = mat
        else:
            Ainput = IntegerMatrix.from_matrix(mat)
    except:
        Ainput = IntegerMatrix.from_file(filename)

    Ainput_M = GSO.Mat(Ainput, float_type='double')
    Ainput_M.update_gso()
    r = [Ainput_M.get_r(i, i) for i in range(0, Ainput.nrows)]
    L_Ainput_M = LLL.Reduction(Ainput_M)
    L_Ainput_M()

    print r

    A = IntegerMatrix.from_matrix(L_Ainput_M.M.B, int_type="long")

    cleanbkz_mpi = CLEANBKZ_MPI(A, cores)
    #cleanbkz_mpi = BKZ2(A)
    cleanbkz_mpi.lll_obj()
    cleanbkz_mpi.M.update_gso()
    r = [
        log(cleanbkz_mpi.M.get_r(i, i)) for i in range(0, cleanbkz_mpi.A.nrows)
    ]
    print "# starting r "
    print r

    params = BKZ.Param(
        block_size=bs,
        max_loops=5000,
        min_success_probability=.01,
        flags=BKZ.BOUNDED_LLL,  #|BKZ.DUMP_GSO,
        dump_gso_filename="gso_output.file",
        strategies="default.json")
    cleanbkz_mpi(params=params, min_row=0)
    #print "# done. found sv", cleanbkz_mpi.M.B[0]

    # done send last end signal
    for i in range(1, size):
        comm.send(1, i, tag=999)  # *** changed to send

    cleanbkz_mpi.M.update_gso()
    r2 = [
        log(cleanbkz_mpi.M.get_r(i, i)) for i in range(0, cleanbkz_mpi.A.nrows)
    ]
    print cleanbkz_mpi.A[0]
    print "# ending r"
    print r2

    return
Example #6
0
 def reduce_lattice(self, lattice, block_size):
     if block_size is None:
         self.log("Start LLL.")
         return LLL.reduction(lattice)
     else:
         self.log("Start BKZ-{}.".format(block_size))
         return BKZ.reduction(lattice, BKZ.Param(block_size=block_size,
                                                 strategies=BKZ.DEFAULT_STRATEGY,
                                                 auto_abort=True))
Example #7
0
def test_bkz_init():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        for float_type in float_types:
            M = GSO.Mat(copy(A), float_type=float_type)
            lll_obj = LLL.Reduction(M)
            param = BKZ.Param(block_size=3, strategies=BKZ.DEFAULT_STRATEGY)
            bkz = BKZ.Reduction(M, lll_obj, param)
            del bkz
Example #8
0
def test_svp_too_large():
    from fpylll.config import max_enum_dim
    m = max_enum_dim + 1
    n = max_enum_dim + 1
    A = make_integer_matrix(m, n)
    A = LLL.reduction(A)
    M = GSO.Mat(A)
    M.update_gso()
    with pytest.raises(NotImplementedError):
        SVP.shortest_vector(A)
Example #9
0
File: bkz3.py Project: malb/yolo
    def __call__(self, params, min_row=0, max_row=-1):
        """Run the BKZ algorithm with parameters `param`.

        :param params: BKZ parameters
        :param min_row: start processing in this row
        :param max_row: stop processing in this row (exclusive)

        """
        self.ith_block = 0
        tracer = BKZTreeTracer(self, verbosity=params.bkz_param.flags & BKZ.VERBOSE, start_clocks=True)
        self.params = params

        self.lll_objs = 20*[None]
        for i in range(20):
            eta = etas[i]
            self.lll_objs[i] = LLL.Reduction(self.M, flags=LLL.DEFAULT, eta=eta)

        cputime_start = time.clock()

        self.M.discover_all_rows()
        with tracer.context("lll"):
            for i in range(20):
                self.lll_objs[i]()

        if params.rampup:
            with tracer.context("rampup", -1):
                self.preprocessing(params.bkz_param.block_size, min_row, max_row, start=10, step=1, tracer=tracer)

        i = 0
        self.ith_tour = 0
        while True:
            with tracer.context("tour", i):
                self.ith_block = 0
                self.ith_tour += 1
                clean = self.tour(params.bkz_param, min_row, max_row, tracer=tracer, top_level=True)
            print "proba %.4f" % self.tuners[params.bkz_param.block_size].proba
            # for x in sorted(self.tuners[params.bkz_param.block_size].data.keys()):
            #    try:
            #        print x, "\t %d \t %.2f " % (self.tuners[params.bkz_param.block_size].counts[x], self.tuners[params.bkz_param.block_size].data[x])
            #    except:
            #        pass
            print
            i += 1
            if (not clean) or params.bkz_param.block_size >= self.A.nrows:
                break
            if (params.bkz_param.flags & BKZ.AUTO_ABORT) and auto_abort.test_abort():
                break
            if (params.bkz_param.flags & BKZ.MAX_LOOPS) and i >= params.bkz_param.max_loops:
                break
            if (params.bkz_param.flags & BKZ.MAX_TIME) and time.clock() - cputime_start >= params.bkz_param.max_time:
                break

        self.trace = tracer.trace
        return clean
Example #10
0
def iterated_sub_sieve(A, goal):
    n = A.nrows
    M = GSO.Mat(A)
    lll = LLL.Reduction(M)
    lll()
    n = M.d
    d = n / 4
    while M.get_r(0, 0) > goal:
        sub_sieve_plus(lll, d)
        d -= 1
    return d + 1
Example #11
0
def test_gso_update_gso():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        LLL.reduction(A)

        r00 = []
        re00 = []
        g00 = []
        for float_type in float_types:
            M = GSO.Mat(copy(A), float_type=float_type)
            M.update_gso()
            if (m, n) == (0, 0):
                continue
            r00.append(M.get_r(0, 0))
            re00.append(M.get_r_exp(0, 0)[0])
            g00.append(M.get_gram(0, 0))

        for i in range(1, len(r00)):
            assert abs(r00[0]/r00[i] - 1.0) < 0.0001
            assert abs(re00[0]/re00[i] - 1.0) < 0.0001
            assert abs(g00[0]/g00[i] - 1.0) < 0.0001
Example #12
0
def test_bkz_postprocessing():
    A = IntegerMatrix.random(20, "qary", bits=20, k=10, int_type="long")
    LLL.reduction(A)

    bkz = BKZ(A)
    bkz.M.update_gso()
    tracer = BKZTreeTracer(bkz)

    solution = (2, 2, 0, 3, 4, 5, 7)

    v = A.multiply_left(solution, 3)
    bkz.svp_postprocessing(3, len(solution), solution, tracer)
    w = tuple(A[3])
    assert v == w

    solution = (2, 1, 0, 3, 4, 5, 7)

    v = A.multiply_left(solution, 3)
    bkz.svp_postprocessing(3, len(solution), solution, tracer)
    w = tuple(A[3])
    assert v == w
Example #13
0
 def reduce_lattice(self, lattice, block_size):
     """Reduce the lattice, either using *LLL* if `block_size` is `None` or *BKZ* with the given `block_size`."""
     if block_size is None:
         self.log("Start LLL.")
         return LLL.reduction(lattice)
     else:
         self.log("Start BKZ-{}.".format(block_size))
         return BKZ.reduction(
             lattice,
             BKZ.Param(block_size=block_size,
                       strategies=BKZ.DEFAULT_STRATEGY,
                       auto_abort=True))
Example #14
0
def test_gso_update_gso():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        LLL.reduction(A)

        r00 = []
        re00 = []
        g00 = []
        for float_type in float_types:
            M = GSO.Mat(copy(A), float_type=float_type)
            M.update_gso()
            if (m, n) == (0, 0):
                continue
            r00.append(M.get_r(0, 0))
            re00.append(M.get_r_exp(0, 0)[0])
            g00.append(M.get_gram(0, 0))

        for i in range(1, len(r00)):
            abs(r00[0]/r00[i] - 1.0) < 0.0001
            abs(re00[0]/re00[i] - 1.0) < 0.0001
            abs(g00[0]/g00[i] - 1.0) < 0.0001
Example #15
0
def load_matrix_file(filepath,
                     randomize=False,
                     seed=None,
                     doLLL=True,
                     high_prec=False):
    """
    Load matrix from file, LLL reduce (and randomize).

    :param filepath: Load matrix from this file
    :param randomize: Randomize the basis
    :param seed: Seed for randomization
    :returns: lattice basis and BKZ object

    """
    A = IntegerMatrix.from_file(filepath)
    if doLLL:
        A = LLL.reduction(A)
    if not high_prec:
        A = IntegerMatrix.from_matrix(A, int_type="long")

    if not high_prec:
        M = GSO.Mat(A, float_type="double", flags=GSO.ROW_EXPO)
    else:
        M = GSO.Mat(A, float_type="long double", flags=GSO.ROW_EXPO)
    bkz = BKZReduction(M)

    if seed is not None:
        FPLLL.set_random_seed(seed)

    if randomize:
        bkz.randomize_block(0, A.nrows, density=A.ncols / 4)
        LLL.reduction(A)
        bkz = BKZReduction(A)

    if doLLL:
        LLL.reduction(A)
    bkz.lll_obj()  # to initialize bkz.M etc

    return A, bkz
def main_pruning(filename, bs, cores):

    try:
        with open(filename, "rb") as f:
            mat = pickle.load(f)
            #print "len(mat)", len(mat)
            #if (len(mat) > 1):
            #   mat = mat[0]
        if isinstance(mat, IntegerMatrix):
            Ainput = mat
        else:
            Ainput = IntegerMatrix.from_matrix(mat)
    except:
        Ainput = IntegerMatrix.from_file(filename)

    Ainput_M = GSO.Mat(Ainput, float_type='double')
    Ainput_M.update_gso()
    r = [Ainput_M.get_r(i, i) for i in range(0, Ainput.nrows)]
    L_Ainput_M = LLL.Reduction(Ainput_M)
    L_Ainput_M()
    #print r

    A = IntegerMatrix.from_matrix(L_Ainput_M.M.B, int_type="long")
    M = GSO.Mat(A, float_type="double")
    bkzobj = BKZ2(M)
    bkzobj.M.update_gso()
    block_size = bs
    r = [M.get_r(i, i) for i in range(0, block_size)]
    radius = r[0] * 0.99
    preproc_cost = 5000**(rank + 1)

    pr0 = Pruning.run(radius,
                      NPS[block_size] * preproc_cost, [r],
                      0.1,
                      metric="probability",
                      float_type="double",
                      flags=Pruning.GRADIENT | Pruning.NELDER_MEAD)

    print pr0.coefficients
    """
    pruning = prune(radius, NPS[block_size] * preproc_cost, [r], 0.01,
                        metric="probability", float_type="double",
                        flags=Pruning.GRADIENT|Pruning.NELDER_MEAD)
    cost = sum(pruning.detailed_cost) / NPS[block_size]
    print "# [rank %d] cost %.1f, precost %.1f " % (rank, cost, preproc_cost)
    """

    pr0_linear = pr0.LinearPruningParams(block_size, block_size - 2)
    print pr0_linear.coefficients

    return
Example #17
0
def test_lll_lll():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        b00 = []
        for float_type in float_types:
            B = copy(A)
            M = GSO.Mat(B, float_type=float_type)
            lll = LLL.Reduction(M)
            lll()
            if (m, n) == (0, 0):
                continue
            b00.append(B[0, 0])
        for i in range(1, len(b00)):
            assert b00[0] == b00[i]
Example #18
0
def load_prebkz(n, s=0, blocksize=40):
    """
    """

    filename = "qarychallenge/prebkz-%02d-dim-%03d-seed-%02d.txt" % (blocksize,
                                                                     n, s)

    if not os.path.isdir("qarychallenge"):
        os.mkdir("qarychallenge")

    if os.path.isfile(filename) is False:
        set_random_seed(s)
        A = IntegerMatrix.random(n, "qary", q=2**30, k=n // 2)
        print "Did not find '{filename}'. Creating and reducing".format(
            filename=filename)
        print "created, ",
        sys.stdout.flush()
        A = LLL.reduction(A)
        print "LLLed, ",
        sys.stdout.flush()

        if A.nrows >= 160:
            float_type = "long double"
        elif A.nrows >= 200:
            float_type = "dd"
        else:
            float_type = "double"

        M = GSO.Mat(A, float_type=float_type, flags=GSO.ROW_EXPO)

        bkz = BKZReduction(M)

        for b in range(10, blocksize + 1):
            print "\r created, LLLed, BKZed %d" % b,
            sys.stdout.flush()

            par = fplll_bkz.Param(b,
                                  strategies=fplll_bkz.DEFAULT_STRATEGY,
                                  max_loops=1,
                                  flags=fplll_bkz.MAX_LOOPS)
            bkz(par)

        print

        fn = open(filename, "w")
        fn.write(str(A))
        fn.close()

    return load_matrix_file(filename, randomize=False)
Example #19
0
File: yolosvp.py Project: malb/yolo
def test():
    for n in ns:
        print
        print "++++ Dim ", n
        A = make_integer_matrix(n)
        M = GSO.Mat(A, flags=GSO.ROW_EXPO)
        lll_obj = LLL.Reduction(M, flags=LLL.DEFAULT)
        lll_obj()

        t_start = time()
        B = copy(A)
        timer = Timer()
        proudly_parrallel(3, yolo_hsvp, (n, B, 1.05**2))

        print "time: %.1f sec" % (time() - t_start)
Example #20
0
def test_gso_update_gso():
    EPSILON = 0.0001

    for int_type in int_types:
        for m, n in dimensions:
            A = make_integer_matrix(m, n, int_type=int_type)
            LLL.reduction(A)

            r00 = []
            re00 = []
            g00 = []
            for float_type in float_types:
                M = GSO.Mat(copy(A), float_type=float_type)
                M.update_gso()
                if (m, n) == (0, 0):
                    continue
                r00.append(M.get_r(0, 0))
                re00.append(M.get_r_exp(0, 0)[0])
                g00.append(M.get_gram(0, 0))

            for i in range(1, len(r00)):
                assert r00[0] == pytest.approx(r00[i], rel=EPSILON)
                assert re00[0] == pytest.approx(re00[i], rel=EPSILON)
                assert g00[0] == pytest.approx(g00[i], rel=EPSILON)
Example #21
0
def test_cvp():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        A = LLL.reduction(A)
        M = GSO.Mat(A)
        M.update_gso()
        t = list(make_integer_matrix(n, n)[0])
        v0 = CVP.closest_vector(A, t)

        E = Enumeration(M)
        v1, _ = E.enumerate(0, A.nrows, 2, 40, M.from_canonical(t))
        v1 = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(round(x)), v1))
        v1 = tuple((v1*A)[0])

        assert v0 == v1
def mpi_svpchallenge_par3(bs_diff=10,
                          cores=2,
                          start_dim=80,
                          end_dim=80 + 2,
                          BS_RANDOM_RANGE=10):
    dim = start_dim
    A_pre = IntegerMatrix.from_file(
        "/home/shi/suite/sb_fpylll/bench/svpchallenge/svpchallengedim%dseed0.txt"
        % dim)
    if (rank == 0):
        print "# input dim: ", dim
        print "# nrows: ", A_pre.nrows
    ASVP_START = time()
    LLL.reduction(A_pre)
    A = IntegerMatrix.from_matrix(A_pre, int_type="long")
    bkz = BKZReduction(A)
    bkz.lll_obj()
    r = [bkz.M.get_r(i, i) for i in range(dim)]
    goal = (1.05)**2 * gaussian_heuristic(r)
    bs_ulim = dim - bs_diff
    mpi_interacting_parrallel_asvp(A, bs_ulim, goal, cores, BS_RANDOM_RANGE)
    ASVP_TIME = time() - ASVP_START

    # done send signal
    comm.send(99, dest=0, tag=0)
    comm.send(rank, dest=0, tag=1)

    if (rank == 0):
        print(
            "\nSUMMARY", {
                "input dim": dim,
                "bs_range": (bs_ulim - BS_RANDOM_RANGE, bs_ulim),
                "time": ASVP_TIME
            })

    return
Example #23
0
def test_svp():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        A = LLL.reduction(A)
        M = GSO.Mat(A)
        M.update_gso()
        E = Enumeration(M)
        _, v1 = E.enumerate(0, M.d, M.get_r(0, 0), 0)[0]
        v1 = A.multiply_left(v1)
        nv1 = sum([v_**2 for v_ in v1])

        v0 = SVP.shortest_vector(A)
        nv0 = sum([v_**2 for v_ in v0])

        assert nv0 == nv1
Example #24
0
def test_cvp():
    for m, n in dimensions:
        A = make_integer_matrix(m, n)
        A = LLL.reduction(A)
        M = GSO.Mat(A)
        M.update_gso()
        t = list(make_integer_matrix(n, n)[0])
        v0 = CVP.closest_vector(A, t)

        E = Enumeration(M)
        v1, _ = E.enumerate(0, A.nrows, 2, 40, M.from_canonical(t))[0]
        v1 = IntegerMatrix.from_iterable(1, A.nrows,
                                         map(lambda x: int(round(x)), v1))
        v1 = tuple((v1 * A)[0])

        assert v0 == v1
Example #25
0
def test_bkz_bkz():
    for m, n in dimensions:
        if m < 2 or n < 2:
            continue
        A = make_integer_matrix(m, n)
        b00 = []
        for float_type in float_types:
            B = copy(A)
            M = GSO.Mat(B, float_type=float_type)
            lll_obj = LLL.Reduction(M)
            param = BKZ.Param(block_size=min(m, 40), strategies="default.json")
            bkz = BKZ.Reduction(M, lll_obj, param)
            bkz()
            b00.append(B[0, 0])
        for i in range(1, len(b00)):
            assert b00[0] == b00[i]
Example #26
0
 def reduce_lattice(self, lattice, block_size):
     if block_size is None:
         self.log("Start LLL.")
         return LLL.reduction(lattice)
     else:
         if self.sieve:
             #self.log("Start sieving(BKZ-{}).".format(block_size))
             #g6k = Siever(lattice)
             #tracer = SieveTreeTracer(g6k, root_label=("bkz", block_size), start_clocks=True)
             #for _ in range(3):
             #    BKZ_Sieve(g6k, tracer, block_size)
             return lattice
         else:
             self.log("Start BKZ-{}.".format(block_size))
             return BKZ.reduction(
                 lattice,
                 BKZ.Param(block_size=block_size,
                           strategies=BKZ.DEFAULT_STRATEGY,
                           auto_abort=True))
Example #27
0
def gso_workerf(args):
    import copy

    d, q, seed, params, procrastinating, what = args

    dummy = [1.0] * d

    if procrastinating:
        from impl import BKZReduction
        from simu import ProcrastinatingBKZSimulation as BKZSimulation
        from simu import (
            ProcrastinatingBKZQualitySimulation as BKZQualitySimulation, )
    else:
        from fpylll.algorithms.bkz2 import BKZReduction
        from simu import BKZSimulation
        from simu import BKZQualitySimulation

    FPLLL.set_random_seed(seed)
    A = LLL.reduction(IntegerMatrix.random(d, "qary", k=d // 2, q=q))

    if "qs" in what:
        qsimu_r = BKZQualitySimulation(copy.copy(A))(params)
    else:
        qsimu_r = dummy

    if "fs" in what:
        fsimu_r = BKZSimulation(copy.copy(A))(params)
    else:
        fsimu_r = dummy

    if "r" in what:
        BKZReduction(A)(params)
        M = GSO.Mat(A)
        M.update_gso()
        real_r = M.r()
    else:
        real_r = dummy

    return qsimu_r, fsimu_r, real_r
Example #28
0
def compare():
    tuners = [Tuner(b) for b in range(100)]
    recycled_tuners = [Tuner(b) for b in range(100)]

    A = make_integer_matrix(n)
    M = GSO.Mat(A, flags=GSO.ROW_EXPO)
    lll_obj = LLL.Reduction(M, flags=LLL.DEFAULT)
    lll_obj()

    for n in ns:
        params = fplll_bkz.Param(block_size=bs,
                                 max_loops=tours,
                                 flags=fplll_bkz.VERBOSE | fplll_bkz.GH_BND,
                                 strategies="default.json")

        print
        print "======"
        print "dim ", n
        print "======"

        # print
        # print "yoloBKZ"
        # B = copy(A)
        # YoloBKZ(B, tuners=tuners, recycle=False)(b=bs, tours=tours)

        print
        print "Recycled yoloBKZ"
        # B = copy(A)
        # YoloBKZ(B, tuners=recycled_tuners)(b=bs, tours=1)
        # print
        # print "Restart"
        # print

        B = copy(A)
        timer = Timer()
        YoloBKZ(B, tuners=recycled_tuners)(b=bs, tours=tours)
        print "Total: %.3f" % timer.elapsed()
Example #29
0
def silke(A, c, beta, h, m=None, scale=1, float_type="double"):
    """

    :param A:    LWE matrix
    :param c:    LWE vector
    :param beta: BKW block size
    :param m:    number of samples to consider
    :param scale: scale rhs of lattice by this factor

    """
    from fpylll import BKZ, IntegerMatrix, LLL, GSO
    from fpylll.algorithms.bkz2 import BKZReduction as BKZ2

    if m is None:
        m = A.nrows()

    L = dual_instance1(A, scale=scale)
    L = IntegerMatrix.from_matrix(L)
    L = LLL.reduction(L, flags=LLL.VERBOSE)
    M = GSO.Mat(L, float_type=float_type)
    bkz = BKZ2(M)
    t = 0.0
    param = BKZ.Param(block_size=beta,
                      strategies=BKZ.DEFAULT_STRATEGY,
                      auto_abort=True,
                      max_loops=16,
                      flags=BKZ.VERBOSE|BKZ.AUTO_ABORT|BKZ.MAX_LOOPS)
    bkz(param)
    t += bkz.stats.total_time

    H = copy(L)

    import pickle
    pickle.dump(L, open("L-%d-%d.sobj"%(L.nrows, beta), "wb"))

    E = []
    Y = set()
    V = set()
    y_i = vector(ZZ, tuple(L[0]))
    Y.add(tuple(y_i))
    E.append(apply_short1(y_i, A, c, scale=scale)[1])

    v = L[0].norm()
    v_ = v/sqrt(L.ncols)
    v_r = 3.2*sqrt(L.ncols - A.ncols())*v_/scale
    v_l = sqrt(h)*v_

    fmt = u"{\"t\": %5.1fs, \"log(sigma)\": %5.1f, \"log(|y|)\": %5.1f, \"log(E[sigma]):\" %5.1f}"

    print
    print fmt%(t,
               log(abs(E[-1]), 2),
               log(L[0].norm(), 2),
               log(sqrt(v_r**2 + v_l**2), 2))
    print
    for i in range(m):
        t = cputime()
        M = GSO.Mat(L, float_type=float_type)
        bkz = BKZ2(M)
        t = cputime()
        bkz.randomize_block(0, L.nrows, stats=None, density=3)
        LLL.reduction(L)
        y_i = vector(ZZ, tuple(L[0]))
        l_n = L[0].norm()
        if L[0].norm() > H[0].norm():
            L = copy(H)
        t = cputime(t)

        Y.add(tuple(y_i))
        V.add(y_i.norm())
        E.append(apply_short1(y_i, A, c, scale=scale)[1])
        if len(V) >= 2:
            fmt =  u"{\"i\": %4d, \"t\": %5.1fs, \"log(|e_i|)\": %5.1f, \"log(|y_i|)\": %5.1f,"
            fmt += u"\"log(sigma)\": (%5.1f,%5.1f), \"log(|y|)\": (%5.1f,%5.1f), |Y|: %5d}"
            print fmt%(i+2, t, log(abs(E[-1]), 2), log(l_n, 2), log_mean(E), log_var(E), log_mean(V), log_var(V), len(Y))

    return E
    def svp_reduction_mpi(self,
                          kappa,
                          block_size,
                          params,
                          tracer=dummy_tracer):
        # time
        start_time = time()

        # max preprocessing block size
        bs_diff = BS_DIFF
        bs_max = block_size - bs_diff

        # set goal
        self.M.update_gso()

        # current vector
        #kappa_length = self.M.get_r(kappa, kappa) * self.lll_obj.delta
        kappa_length = self.M.get_r(kappa, kappa)

        # gh length
        r = [self.M.get_r(i, i) for i in range(kappa, kappa + block_size)]
        #gh_length = gaussian_heuristic(r) * params.gh_factor
        gh_length = gaussian_heuristic(r) * 1.1

        goal = gh_length
        if (kappa_length <= goal):
            if (rank == 0 and block_size >= BOUND_SINGLE):
                print "# [rank %d] kappa = %d bs = %d, goal = %d, r = %d (already achieved -- pass)" % \
                  (rank, kappa, block_size, goal, r[0])
                print "gh_factor: ", params.gh_factor
            return 1
        """
        goal = (goal + kappa_length) / 2
        if (goal < r[0] * 0.95):
            goal = r[0] * 0.95
        """

        # info
        #"""
        if (rank == 0 and block_size >= BOUND_SINGLE):
            print "# [rank %d] kappa = %d bs = %d, goal = %d, r = %d" % \
              (rank, kappa, block_size, goal, r[0])

        #"""
        # set matrices
        n = self.A.nrows
        trials = self.ncores * [0]
        As = self.ncores * [None]
        POOL_SIZE = 8 * self.ncores
        POOL_COPIES = 1 + self.ncores / 4

        # randomization matrices
        for i in range(self.ncores):
            As[i] = self.copy_to_IntegerMatrix_long(self.A)
            M = GSO.Mat(As[i], float_type=TYPE)
            bkz = BKZ2_SUB(M)
            bkz.randomize_block(kappa,
                                kappa + block_size,
                                density=block_size / 3)
            del bkz
            del M

        # setup share pools
        sv_pool = SVPool(POOL_SIZE, copies=POOL_COPIES)
        sv_pool.data = []
        workers = self.ncores * [None]
        over = False

        #print "####################################################"
        #print "### Process", rank, " starts cores ", self.ncores

        # list of queues
        list_queue = [Queue() for i in xrange(self.ncores)]
        fail_list = [0 for i in xrange(self.ncores)]
        break_flag = [0 for i in xrange(self.ncores)]
        terminated_by_other = 0
        filenames = ["b" + str(n) + "_n" + str(rank) + \
                    "-c" + str(i) for i in xrange(self.ncores)]

        while not over:

            state = MPI.Status()
            okay = comm.iprobe(source=MPI.ANY_SOURCE, tag=99, status=state)
            if (okay):
                node = state.Get_source()
                data = comm.recv(source=node, tag=99)
                #print "# [rank %d] receive exit signal from rank %d " % (rank, node)
                if (data == 1):
                    terminated_by_other = 1
                    self.A = As[0]  # need improve
                    self.M = GSO.Mat(self.A, float_type=TYPE)
                    self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
                    for i in range(self.ncores):
                        self.insert_in_IntegerMatrix(self.A, As[i][kappa],
                                                     kappa, block_size)
                    break

            sleep(0.01)

            for i in range(self.ncores):

                if workers[i] is None:
                    v = sv_pool.pop()
                    if v is not None:
                        self.insert_in_IntegerMatrix(As[i], v, kappa,
                                                     block_size)

                    bsi = bs_max - 30
                    bsi += min(20, 2 *
                               trials[i])  # about 10 trials to go up to bs_max
                    bsi -= random.randint(0, 3)

                    norm = self.M.get_r(kappa, kappa)
                    pickle.dump((As[i], norm, False), open(filenames[i], 'wb'))

                    # just use current bsi
                    if (fail_list[i] <= 0):
                        workers[i] = Process(
                            target=self.svp_reduction_mpi_trial,
                            args=(As[i], kappa, block_size, bsi, list_queue[i],
                                  filenames[i]))

                    # increase blocksize bsi
                    elif (fail_list[i] > 0
                          and fail_list[i] <= THRESHOLD_LEVEL1):
                        bsi = min(bs_max, bsi + fail_list[i])
                        workers[i] = Process(
                            target=self.svp_reduction_mpi_trial,
                            args=(As[i], kappa, block_size, bsi, list_queue[i],
                                  filenames[i]))
                    # stablize bsi; but rerandomization
                    elif (fail_list[i] <= THRESHOLD_LEVEL2):
                        M = GSO.Mat(As[i], float_type=TYPE)
                        bkz = BKZ2_SUB(M)
                        bkz.randomize_block(kappa,
                                            kappa + block_size,
                                            density=block_size / 3)
                        del bkz
                        del M
                        workers[i] = Process(
                            target=self.svp_reduction_mpi_trial,
                            args=(As[i], kappa, block_size, bsi, list_queue[i],
                                  filenames[i]))
                    else:
                        workers[i] = Process(
                            target=self.svp_reduction_mpi_trial,
                            args=(As[i], kappa, block_size, bsi, list_queue[i],
                                  filenames[i]))
                        break_flag[i] = True

                    # start woker
                    t = workers[i].start()

                if (workers[i] is not None) and (not workers[i].is_alive()):

                    # get and insert
                    As[i], norm, success = pickle.load(open(
                        filenames[i], 'rb'))
                    """
                    success = list_queue[i].get()
                    norm = list_queue[i].get()
                    j = 0
                    # buffer read for queue
                    while True:
                        if (j >= block_size):
                            break
                        try:
                            v = list_queue[i].get()
                        except Queue.Empty:
                            break
                        for k in range(As[i].ncols):
                            As[i][kappa+j, k] = v[k]
                        j += 1
                    #for j in xrange(THRESHOLD_SEND):
                    #    self.insert_in_IntegerMatrix(As[i], mat[j], kappa, block_size)
                    """

                    # break
                    if (break_flag[i]):
                        workers[i] = None
                        self.A = As[i]
                        self.M = GSO.Mat(self.A, float_type=TYPE)
                        self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
                        over = True
                        break

                    # found smaller norm than goal
                    if norm < goal:
                        #print "# SVP-", n, "SOLUTION :", As[i][kappa]
                        #print "#   [rank %d] found SVP-%d, norm %d < goal %d" % (rank,
                        #            block_size, norm, goal)
                        self.A = As[i]
                        self.M = GSO.Mat(self.A, float_type=TYPE)
                        self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
                        over = True
                        break
                    # found smaller norm but larger than goal
                    if (success):
                        sv_pool.push([x for x in As[i][kappa]], norm)
                        fail_list[i] = 0
                    else:
                        norm = 1e100
                        fail_list[i] += 1

                    # done update
                    workers[i] = None
                    trials[i] += 1

        for w in [w for w in workers if w is not None]:
            w.terminate()

        # terminated by this process -- signal others MPI's to stop
        if (terminated_by_other == 0):
            for i in range(size):
                if (i != rank):
                    comm.isend(1, i, tag=99)

        # sending data to master node
        self.M.update_gso()
        if (rank != 0):
            send_vec = []
            #for i in range(block_size/10):
            #    send_vec.append(list(self.A[kappa+i]))
            send_vec.append(list(self.A[kappa]))
            # self.M.update_gso()
            norm = self.M.get_r(kappa, kappa)
            comm.isend(send_vec, dest=0, tag=11)

        # master node receiving data
        if (rank == 0):
            num = 0
            while (1):
                if (num >= size - 1):
                    break
                sleep(.01)
                state = MPI.Status()
                okay = comm.iprobe(source=MPI.ANY_SOURCE, tag=11, status=state)
                if (okay):
                    num += 1
                    node = state.Get_source()
                    vectors = comm.recv(source=node, tag=11)
                    for i in range(len(vectors)):
                        self.insert_in_IntegerMatrix(self.A, vectors[i], kappa,
                                                     block_size)
                    self.M = GSO.Mat(self.A, float_type=TYPE)
                    self.M.update_gso()
                    self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
                    norm = self.M.get_r(kappa, kappa)

        # message
        self.M.update_gso()
        kappa_length = self.M.get_r(kappa, kappa)
        if (rank == 0 and block_size >= BOUND_SINGLE):
            print "# [rank %d] kappa %d, bs %d, r %d (gh %d), time %s, trials %s " % \
            (rank, kappa, block_size, kappa_length, goal, time()-start_time, trials)

        # check processes
        while True:
            sleep(.01)
            some_alive = False
            for w in [w for w in workers if w is not None]:
                some_alive |= w.is_alive()
            if not some_alive:
                return 1
Example #31
0
def prepare(n):
    A = IntegerMatrix.random(n, "qary", bits=n / 2, k=n / 2)
    M = GSO.Mat(A)
    L = LLL.Reduction(M)
    L()
    return M
Example #32
0
        r = [bkz.M.get_r(i, i) for i in range(n)]
        trials += 1

    print "Finished !"
    print_basis_stats(bkz.M, n)
    return trials


START = time()
for dim in range(start_dim, 130, 2):
    for r in range(repeat):
        A_pre = IntegerMatrix.from_file(
            "svpchallenge/svpchallengedim%dseed0.txt" % dim)
        print "---------------------", A_pre.nrows
        ASVP_START = time()
        LLL.reduction(A_pre)

        # bs = int(floor(dim * bs_rat))

        A = IntegerMatrix.from_matrix(A_pre, int_type="long")

        trials = asvp(A, bs, gh_factor=(1.05**2))
        ASVP_TIME = time() - ASVP_START

        print "\n\n Challenge %d Solved" % dim
        print A[0]
        print "SUMMARY", {
            "dim": dim,
            "bs": bs,
            "time": ASVP_TIME,
            "trials": trials
Example #33
0
    def __call__(self, solver=None, flavor="plain", worst_case=False, sample=True, **kwds):
        """
        Solve the HNP instance.

        :param solver: a uSVP with predicate solver or ``None`` for letting ``usvp_pred_solve`` decide.
        :param sample: if ``True`` a fresh basis is sampled
        :param worst_case: if ``True`` the target norm is chosen to match the maximum of the target, this will be slow.

        """
        if sample:
            self.M = self.gen_lattice()

        tau = max([2 ** (klen - 1) for klen in self.klen_list])

        def predicate(v, standard_basis=True):
            G_powers, A0, A1 = self._data_for_test()
            w = 2 ** (self.klen_list[0] - 1)
            f = Integer((2 ** (max(self.klen_list) - 1)) / w)

            if standard_basis:
                nz = v[-1]
            else:
                nz = sum(round(v[i]) * A1[i] for i in range(len(A1)))  # the last coefficient must be non-zero

            if abs(nz) != tau:
                return False

            if standard_basis:
                kG = G_powers[v[0] // f]
            else:
                kG = sum(round(v[i]) * G_powers[A0[i]] for i in range(len(A0)))

            r = self.r_list[0]
            if (kG + G_powers[w]).xy()[0] == r:
                return True
            elif (-kG + G_powers[w]).xy()[0] == r:
                return True
            else:
                return False

        def invalidate_cache():
            self._data_for_test.clear_cache()

        if worst_case:
            target_norm = self.mvf(self.m, max(self.klen_list), prec=self.ecdsa.nbits // 2)
        else:
            target_norm = self.evf(self.m, max(self.klen_list), prec=self.ecdsa.nbits // 2)

        LLL.Reduction(self.M)()
        invalidate_cache()

        res = flavors[flavor](
            self.M,
            predicate,
            squared_target_norm=target_norm ** 2,
            invalidate_cache=invalidate_cache,
            threads=self.threads,
            solver=solver,
            **kwds
        )

        if res.success:
            key = self.recover_key(res.solution)
        else:
            key = False

        return key, res
Example #34
0
def dim_error_tradeoff(A,
                       c,
                       u,
                       beta,
                       h,
                       k,
                       alpha=None,
                       tau=None,
                       float_type="mpfr",
                       use_lll=True):
    """

    :param A:    LWE matrix
    :param c:    LWE vector
    :param u:	 Uniform vector
    :param beta: BKW block size
    :param h: 	 Hamming weight of secret
    :param k:    LWE dim after tradeoff
    :param tau:  number of new samples to generate
    :param use_lll: 	If True, run BKZ only once and then run LLL
    					If False, run BKZ iteratively

    * secret vector s is used to see the error term of new LWE(-like) samples.

    """

    from fpylll import BKZ, IntegerMatrix, LLL, GSO
    from fpylll.algorithms.bkz2 import BKZReduction as BKZ2

    n = A.ncols()
    q = A.base_ring().order()
    K = GF(q, proof=False)

    if alpha is None:
        alpha = 8 / q

    if tau is None:
        tau = 30

    m = A.nrows() / n

    scale = round(alpha * q * sqrt(m) / sqrt(2 * pi * h))
    scale = ZZ(scale)

    count = 0

    A_k = matrix(ZZ, 1, k)
    c_k = []
    u_k = []
    length = 0

    while count < tau:

        r = count * m
        T = A.matrix_from_rows([i + r for i in range(m)])
        ct = c[r:r + m]
        ut = u[r:r + m]

        T1 = T.matrix_from_columns([i for i in range(n - k)])

        L = dual_instance1(T1, scale=scale)
        L = IntegerMatrix.from_matrix(L)
        L = LLL.reduction(L)
        M = GSO.Mat(L, float_type=float_type)
        bkz = BKZ2(M)
        param = BKZ.Param(block_size=beta,
                          strategies=BKZ.DEFAULT_STRATEGY,
                          auto_abort=True,
                          max_loops=16,
                          flags=BKZ.AUTO_ABORT | BKZ.MAX_LOOPS)
        bkz(param)

        H = copy(L)

        y = vector(ZZ, tuple(L[0]))
        length += y.norm()

        T2 = T.matrix_from_columns([n - k + i for i in range(k)])

        A_kt, c_kt, u_kt = apply_short1(y, T2, ct, ut, scale=scale)
        if r == 0:
            A_k[0] = A_kt
        else:
            A_k = A_k.stack(A_kt)
        c_k.append(c_kt)
        u_k.append(u_kt)

        count += 1

    length = float(length / tau)
    A_k = A_k.change_ring(K)
    c_k = vector(K, c_k)
    u_k = vector(K, u_k)

    B = float(2 + 1 / sqrt(2 * pi)) * (alpha * q)
    B = B * B * m / (m + n)
    B = sqrt(B) * length / scale

    print '(A_k, c_k) is k-dim LWE samples (with secret s[-k:]) / (A_k, u_k) is uniform samples. '

    return A_k, c_k, u_k, B
Example #35
0
    def svp_reduction_single(self,
                             kappa,
                             block_size,
                             params,
                             tracer=dummy_tracer):
        """
        :param kappa:
        :param block_size:
        :param params:
        :param tracer:
        """
        print self.lll_obj.delta
        verbose = 0

        if (params.flags & BKZ.DUMP_GSO):
            verbose = 1

        if (verbose):
            start_time = time()
        self.M.update_gso()
        r = [self.M.get_r(i, i) for i in range(kappa, kappa + block_size)]
        gh_length = gaussian_heuristic(r)
        kappa_length = self.M.get_r(kappa, kappa)
        goal = min(kappa_length, gh_length)

        self.lll_obj.size_reduction(0, kappa + 1)
        old_first, old_first_expo = self.M.get_r_exp(kappa, kappa)
        remaining_probability = 1.0
        rerandomize = False
        trials = 0
        sub_solutions = block_size > SUBSOL_BLOCKSIZE

        # copy old lattice
        if (params.flags & BKZ.DUMP_GSO):
            A_backup = self.copy_to_IntegerMatrix_long(self.A)
            v_old = A_backup[kappa]
            r_old = [
                log(self.M.get_r(i, i))
                for i in range(kappa, kappa + block_size)
            ]

        # main loop
        while remaining_probability > 1. - params.min_success_probability:

            # 1. preprocessing
            preproc_start = time()

            with tracer.context("preprocessing"):
                #self.M.update_gso()
                self.svp_preprocessing(kappa,
                                       block_size,
                                       params,
                                       trials,
                                       tracer=tracer)
            preproc_cost = time() - preproc_start

            with tracer.context("pruner"):
                target = 1 - ((1. - params.min_success_probability) /
                              remaining_probability)

                radius, pruning = self.get_pruning(kappa, block_size, params,
                                                   target * 1.01, preproc_cost,
                                                   tracer)

            # 2. enum
            enum_obj = Enumeration(self.M, sub_solutions=sub_solutions)
            try:
                with tracer.context("enumeration",
                                    enum_obj=enum_obj,
                                    probability=pruning.expectation,
                                    full=block_size == params.block_size):
                    max_dist, solution = enum_obj.enumerate(
                        kappa,
                        kappa + block_size,
                        radius,
                        0,
                        pruning=pruning.coefficients)[0]

                # 3. post processing
                with tracer.context("postprocessing"):
                    preproc_start = time(
                    )  # Include post_processing time as the part of the next pre_processing

                    if not sub_solutions:
                        self.svp_postprocessing(kappa,
                                                block_size,
                                                solution,
                                                tracer=tracer)
                    if sub_solutions:
                        self.insert_sub_solutions(
                            kappa, block_size,
                            enum_obj.sub_solutions[:1 + block_size / 4])
                    self.M.update_gso()

            except EnumerationError:
                preproc_start = time()

            remaining_probability *= (1 - pruning.expectation)

            trials += 1

        # recover basis
        if (params.flags & BKZ.DUMP_GSO):
            r_new = [
                self.M.get_r(i, i) for i in range(kappa, kappa + block_size)
            ]
            current = self.copy_to_vector_long(self.A[kappa])
            # update
            self.copy_from_IntegerMatrix_long(A_backup)
            self.M = GSO.Mat(self.A, float_type=TYPE)
            self.M.update_gso()
            self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
            self.insert_in_IntegerMatrix(self.A, current, kappa, block_size)
            # update again for safe
            self.M = GSO.Mat(self.A, float_type=TYPE)
            self.M.update_gso()
            self.M = GSO.Mat(self.A, float_type=TYPE)
            self.M = GSO.Mat(self.A, float_type=TYPE)
            self.M.update_gso()
            self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT)
            if (not self.check_compare(A_backup, self.A, kappa, block_size)):
                print "# error exit"
                sys.exit(1)

        self.M.update_gso()
        new_first, new_first_expo = self.M.get_r_exp(kappa, kappa)
        clean = old_first <= new_first * 2**(new_first_expo - old_first_expo)

        if (params.flags & BKZ.DUMP_GSO):
            global stat_update_gh
            r_new = self.M.get_r(kappa, kappa)
            r_newlog = [
                log(self.M.get_r(i, i))
                for i in range(kappa, kappa + block_size)
            ]
            stat_update_gh[stat_tours - 1].append(
                float(sqrt(r_new / gh_length)))

        if (verbose):
            if (rank == 0):
                kappa_length = r_new
                print "# [rank %d] kappa %d, bs %d, r %d (gh %d), time %s, trials %s " % \
                  (rank, kappa, block_size, kappa_length, goal, time()-start_time, trials)

                det_n = float(sum(r_old) / block_size)
                normalized_old = [(r_old[i] - det_n)
                                  for i in range(0, block_size)]
                normalized_new = [(r_newlog[i] - det_n)
                                  for i in range(0, block_size)]
                global stat_old_norm
                global stat_new_norm

                if (block_size == params.block_size):
                    for i in range(block_size):
                        stat_old_norm[i] = stat_old_norm[i] + normalized_old[i]
                        stat_new_norm[i] = stat_new_norm[i] + normalized_new[i]

        return clean