示例#1
0
def test_ntru_gsa(n):

	# read from file
	filename = 'lwe_n'+str(n)+'.txt'
	data = open(filename, 'r').readlines()
	q = int(data[0])
	B = eval(",".join([s_.replace('\n','').replace(" ", ", ") for s_ in data[1:n+1]]))
	B = IntegerMatrix.from_matrix(B)
	c = eval(data[n+1].replace(" ", ','))

	beta, nsamples, prep_rt, GSA = find_beta(n, q, n)
	print('beta, nsamples:', beta, nsamples)
	print('GSA predicted:')
	print(GSA)

	print('beta, nsamples: ', beta, nsamples)
	Basis = primal_lattice_basis(B, c, q, nsamples)

	g6k = Siever(Basis)

	d = g6k.full_n
	g6k.lll(0, g6k.full_n)
	print(g6k.MatGSO)
	slope = basis_quality(g6k.M)["/"]
	print("Intial Slope = %.5f\n" % slope)

	print('GSA input:')
	print([g6k.M.get_r(i, i) for i in range(d)])

	print('d:', d)
	target_norm = ceil( (2./3)*d + 1) + 1
	print("target_norm:", target_norm)
	#
	#   Preprocessing
	#
	if beta < 50:
		print("Starting a fpylll BKZ-%d tour. " % (beta))
		sys.stdout.flush()
		bkz = BKZReduction(g6k.M)
		par = fplll_bkz.Param(beta,
							  strategies=fplll_bkz.DEFAULT_STRATEGY,
							  max_loops=1)
		bkz(par)

	else:
		print("Starting a pnjBKZ-%d tour. " % (beta))
		pump_n_jump_bkz_tour(g6k, tracer, beta, jump=jump,
								 verbose=verbose,
								 extra_dim4free=extra_dim4free,
								 dim4free_fun=dim4free_fun,
								 goal_r0=target_norm,
								 pump_params=pump_params)

			#T_BKZ = time.time() - T0_BKZ

	print('GSA output:')
	print([g6k.M.get_r(i, i) for i in range(d)])

	return 1
    def exit(self, **kwds):
        """
        By default CPU and wall time are recorded.  More information is recorded for "enumeration"
        and "tour" labels.  When the label is a tour then the status is printed if verbosity > 0.
        """
        node = self.current
        label = node.label

        node.data["cputime"] += time.clock()
        node.data["walltime"] += time.time()

        if label == "enumeration":
            full = kwds.get("full", True)
            if full:
                node.data["#enum"] = Statistic(kwds["enum_obj"].get_nodes(),
                                               repr="sum") + node.data.get(
                                                   "#enum", None)
                try:
                    node.data["%"] = Statistic(kwds["probability"],
                                               repr="avg") + node.data.get(
                                                   "%", None)
                except KeyError:
                    pass

        if label[0] == "tour":
            data = basis_quality(self.instance.M)
            for k, v in data.items():
                if k == "/":
                    node.data[k] = Statistic(v, repr="max")
                else:
                    node.data[k] = Statistic(v, repr="min")

        if self.verbosity and label[0] == "tour":
            report = OrderedDict()
            report["i"] = label[1]
            report["cputime"] = node["cputime"]
            report["walltime"] = node["walltime"]
            try:
                report["preproc"] = node.find("preprocessing", True)["cputime"]
            except KeyError:
                pass
            try:
                report["svp"] = node.find("enumeration", True)["cputime"]
            except KeyError:
                pass
            report["#enum"] = node.sum("#enum")
            report["lll"] = node.sum("cputime", label="lll")
            try:
                report["pruner"] = node.find("pruner", True)["cputime"]
            except KeyError:
                pass
            report["r_0"] = node["r_0"]
            report["/"] = node["/"]

            print(pretty_dict(report))

        self.current = self.current.parent
示例#3
0
def play(BKZ, A, block_size, tours, progressive_step_size=None):
    """Call ``BKZ`` on ``A`` with ``block_size`` for the given number of ``tours``.

    The given number of tours is used for all block sizes from 2 up to ``block_size`` in increments of
    ``progressive_step_size``. Providing ``None`` for this parameter disables the progressive strategy.

    :param BKZ: a BKZ class whose ``__call__`` accepts a single block size as parameter
    :param A: an integer matrix
    :param block_size: a block size >= 2
    :param tours: number of tours >= 1
    :param progressive_step_size: step size for progressive strategy
    :returns: a trace of the execution using ``BKZTreeTracer``

    ..  note :: This function essentially reimplements ``BKZ.__call__`` but supports the
        progressive strategy.
    """
    bkz = BKZ(copy.copy(A))
    tracer = BKZTreeTracer(bkz, start_clocks=True)

    # this essentially initialises the GSO object, LLL was already run by the constructor, so this
    # is quick.
    with tracer.context("lll"):
        bkz.lll_obj()

    if progressive_step_size is None:
        block_sizes = (block_size, )
    elif int(progressive_step_size) > 0:
        block_sizes = range(2, block_size + 1, progressive_step_size)
        if block_sizes[-1] != block_size:
            block_sizes.append(block_size)
    else:
        raise ValueError("Progressive step size of %s not understood." %
                         progressive_step_size)

    for block_size in block_sizes:
        for i in range(tours):
            with tracer.context("tour", (block_size, i)):
                bkz.tour(block_size, tracer=tracer)
    tracer.exit()
    trace = tracer.trace

    quality = basis_quality(bkz.M)
    for k, v in quality.items():
        trace.data[k] = v

    return trace
def bkz_call(BKZ, A, block_size, tours, progressive_step_size=None):
    """Call ``BKZ`` on ``A`` with ``block_size`` for the given number of ``tours``.

    If ``return_queue`` is not ``None`` then the trace and the provided ``tag`` are put on the
    queue.  Otherwise, they are returned.

    :param BKZ:
    :param A:
    :param block_size:
    :param tours:
    :param progressive_step_size:

    .. note :: This function essentially reimplements ``BKZ.__call__`` but supports the progressive strategy.

    """
    bkz = BKZ(copy.copy(A))
    tracer = BKZTreeTracer(bkz, start_clocks=True)

    # this essentially initialises the GSO object, LLL was already run by the constructor, so this
    # is quick.
    with tracer.context("lll"):
        bkz.lll_obj()

    if progressive_step_size is None:
        block_sizes = (block_size, )
    elif int(progressive_step_size) > 0:
        block_sizes = range(2, block_size + 1, progressive_step_size)
        if block_sizes[-1] != block_size:
            block_sizes.append(block_size)
    else:
        raise ValueError("Progressive step size of %s not understood." %
                         progressive_step_size)

    for block_size in block_sizes:
        for i in range(tours):
            with tracer.context("tour", (block_size, i)):
                bkz.tour(block_size, tracer=tracer)
    tracer.exit()
    trace = tracer.trace

    quality = basis_quality(bkz.M)
    for k, v in quality.items():
        trace.data[k] = v

    return trace
示例#5
0
    def exit(self, **kwds):
        """
        When the label is a tour then the status is printed if verbosity > 0.
        """
        node = self.current
        label = node.label

        if label[0] == "tour":
            data = basis_quality([2 ** (2 * r_) for r_ in self.instance.r])
            for k, v in data.items():
                if k == "/":
                    node.data[k] = Accumulator(v, repr="max")
                else:
                    node.data[k] = Accumulator(v, repr="min")

        if self.verbosity and label[0] == "tour":
            report = OrderedDict()
            report["i"] = label[1]
            report["#enum"] = node.sum("#enum")
            report["r_0"] = node["r_0"]
            report["/"] = node["/"]
            print(pretty_dict(report))

        self.current = self.current.parent
示例#6
0
def simulate(r, param):
    """
    BKZ simulation algorithm as proposed by Chen and Nguyen in "BKZ 2.0: Better Lattice Security
    Estimates".  Returns the reduced squared norms of the GSO vectors of the basis and the number of
    BKZ tours simulated.  This version terminates when no substantial progress is made anymore or at
    most ``max_loops`` tours were simulated.  If no ``max_loops`` is given, at most ``d`` tours are
    performed, where ``d`` is the dimension of the lattice.

    :param r: squared norms of the GSO vectors of the basis.
    :param param: BKZ parameters

    EXAMPLE:

        >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ
        >>> FPLLL.set_random_seed(1337)
        >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50))
        >>> M = GSO.Mat(A)

        >>> from fpylll.tools.bkz_simulator import simulate
        >>> _ = simulate(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE))
        {"i":        0,  "r_0":   2^33.3,  "r_0/gh": 6.110565,  "rhf": 1.018340,  "/": -0.07013,  "hv/hv": 2.424131}
        {"i":        1,  "r_0":   2^32.7,  "r_0/gh": 4.018330,  "rhf": 1.016208,  "/": -0.06161,  "hv/hv": 2.156298}
        {"i":        2,  "r_0":   2^32.3,  "r_0/gh": 2.973172,  "rhf": 1.014679,  "/": -0.05745,  "hv/hv": 2.047014}
        {"i":        3,  "r_0":   2^32.1,  "r_0/gh": 2.583479,  "rhf": 1.013966,  "/": -0.05560,  "hv/hv": 2.000296}


    """

    if isinstance(r, IntegerMatrix):
        r = GSO.Mat(r)
    if isinstance(r, MatGSO):
        r.update_gso()
        r = r.r()

    d = len(r)

    # code uses log2 of norms, FPLLL uses squared norms
    r = list(map(lambda x: log(x, 2) / 2., r))

    r1 = copy(r)
    r2 = copy(r)
    c = [rk[-i] - sum(rk[-i:]) / i for i in range(1, 46)]
    c += [(lgamma(beta / 2. + 1) * (1. / beta) - log(sqrt(pi))) / log(2.)
          for beta in range(46, param.block_size + 1)]

    if param.max_loops:
        max_loops = param.max_loops
    else:
        max_loops = d

    for i in range(max_loops):
        phi = True
        for k in range(d - min(45, param.block_size)):
            beta = min(param.block_size, d - k)
            f = k + beta
            logV = sum(r1[:f]) - sum(r2[:k])
            lma = logV / beta + c[beta - 1]
            if phi:
                if lma < r1[k]:
                    r2[k] = lma
                    phi = False
            else:
                r2[k] = lma

        # early termination
        if phi or r1 == r2:
            break
        else:
            beta = min(45, param.block_size)
            logV = sum(r1) - sum(r2[:-beta])

            if param.block_size < 45:
                tmp = sum(rk[-param.block_size:]) / param.block_size
                rk1 = [r_ - tmp for r_ in rk[-param.block_size:]]
            else:
                rk1 = rk

            for k, r in zip(range(d - beta, d), rk1):
                r2[k] = logV / beta + r
            r1 = copy(r2)

        if param.flags & BKZ.VERBOSE:
            r = OrderedDict()
            r["i"] = i
            for k, v in basis_quality(list(map(lambda x: 2.**(2 * x),
                                               r1))).items():
                r[k] = v
            print(pretty_dict(r))

    r1 = list(map(lambda x: 2.**(2 * x), r1))
    return r1, i + 1
示例#7
0
文件: bkz.py 项目: mickeyUcas/g6k
def bkz_kernel(arg0, params=None, seed=None):
    """
    Run the BKZ algorithm with different parameters.

    :param d: the dimension of the lattices to BKZ reduce
    :param params: parameters for BKZ:

        - bkz/alg: choose the underlying BKZ from
          {fpylll, naive, pump_n_jump, slide}

        - bkz/blocksizes: given as low:high:inc perform BKZ reduction
          with blocksizes in range(low, high, inc) (after some light)
          prereduction

        - bkz/pre_blocksize: prereduce lattice with fpylll BKZ up
          to this blocksize

        - bkz/tours: the number of tours to do for each blocksize

        - bkz/extra_dim4free: lift to indices extra_dim4free earlier in
          the lattice than the currently sieved block

        - bkz/jump: the number of blocks to jump in a BKZ tour after
          each pump

        - bkz/dim4free_fun: in blocksize x, try f(x) dimensions for free,
          give as 'lambda x: f(x)', e.g. 'lambda x: 11.5 + 0.075*x'

        - pump/down_sieve: sieve after each insert in the pump-down
          phase of the pump
        
        - slide/overlap: shift of the dual blocks when running slide reduction

        - challenge_seed: a seed to randomise the generated lattice

        - dummy_tracer: use a dummy tracer which capture less information

        - verbose: print tracer information throughout BKZ run

    """
    # Pool.map only supports a single parameter
    if params is None and seed is None:
        d, params, seed = arg0
    else:
        d = arg0

    # params for underlying BKZ/workout/pump
    dim4free_fun = params.pop("bkz/dim4free_fun")
    extra_dim4free = params.pop("bkz/extra_dim4free")
    jump = params.pop("bkz/jump")
    overlap = params.pop("slide/overlap")
    pump_params = pop_prefixed_params("pump", params)
    workout_params = pop_prefixed_params("workout", params)

    # flow of the bkz experiment
    algbkz = params.pop("bkz/alg")
    blocksizes = params.pop("bkz/blocksizes")
    blocksizes = eval("range(%s)" % re.sub(":", ",", blocksizes))
    pre_blocksize = params.pop("bkz/pre_blocksize")
    tours = params.pop("bkz/tours")

    # misc
    verbose = params.pop("verbose")
    dont_trace = params.pop("dummy_tracer", False)

    if blocksizes[-1] > d:
        print('set a smaller maximum blocksize with --blocksizes')
        return

    challenge_seed = params.pop("challenge_seed")

    A, bkz = load_prebkz(d, s=challenge_seed, blocksize=pre_blocksize)

    MM = GSO.Mat(A,
                 float_type="double",
                 U=IntegerMatrix.identity(A.nrows, int_type=A.int_type),
                 UinvT=IntegerMatrix.identity(A.nrows, int_type=A.int_type))

    g6k = Siever(MM, params, seed=seed)
    if dont_trace:
        tracer = dummy_tracer
    else:
        tracer = SieveTreeTracer(g6k, root_label=("bkz", d), start_clocks=True)

    if algbkz == "fpylll":
        M = bkz.M
    else:
        M = g6k.M

    T0 = time.time()
    for blocksize in blocksizes:

        for t in range(tours):
            with tracer.context("tour", t, dump_gso=True):
                if algbkz == "fpylll":
                    par = BKZ_FPYLLL.Param(
                        blocksize,
                        strategies=BKZ_FPYLLL.DEFAULT_STRATEGY,
                        max_loops=1)
                    bkz(par)

                elif algbkz == "naive":
                    naive_bkz_tour(g6k,
                                   tracer,
                                   blocksize,
                                   extra_dim4free=extra_dim4free,
                                   dim4free_fun=dim4free_fun,
                                   workout_params=workout_params,
                                   pump_params=pump_params)

                elif algbkz == "pump_and_jump":
                    pump_n_jump_bkz_tour(g6k,
                                         tracer,
                                         blocksize,
                                         jump=jump,
                                         dim4free_fun=dim4free_fun,
                                         extra_dim4free=extra_dim4free,
                                         pump_params=pump_params)
                elif algbkz == "slide":
                    slide_tour(g6k,
                               dummy_tracer,
                               blocksize,
                               overlap=overlap,
                               dim4free_fun=dim4free_fun,
                               extra_dim4free=extra_dim4free,
                               workout_params=workout_params,
                               pump_params=pump_params)
                else:
                    raise ValueError("bkz/alg=%s not recognized." % algbkz)

            if verbose:
                slope = basis_quality(M)["/"]
                fmt = "{'alg': '%25s', 'jump':%2d, 'pds':%d, 'extra_d4f': %2d, 'beta': %2d, 'slope': %.5f, 'total walltime': %.3f}"  # noqa
                print(fmt % (algbkz + "+" + ("enum" if algbkz == "fpylll" else
                                             g6k.params.default_sieve), jump,
                             pump_params["down_sieve"], extra_dim4free,
                             blocksize, slope, time.time() - T0))

    tracer.exit()
    slope = basis_quality(M)["/"]
    stat = tracer.trace
    try:
        stat.data["slope"] = np.array(slope)
        return stat
    except AttributeError:
        return None
示例#8
0
文件: stats.py 项目: mickeyUcas/g6k
    def exit(self, **kwds):
        """
        By default CPU and wall time are recorded.  More information is recorded for sieve labels.
        """
        node = self.current

        node.data["cputime"] += process_time()
        node.data["walltime"] += time.time()

        self.instance.M.update_gso()

        if self.is_sieve_node(node.label):
            if isinstance(self.instance, Siever):
                instance = self.instance
            else:
                instance = self.instance.sieve

            node.data["|db|"] = Accumulator(
                len(instance), repr="max") + node.data.get("|db|", None)

            # determine the type of sieve:

            # idstring should be among SieveTreeTraces.recognized_sieves or "all".
            # This is used to look up what statistics to include in Siever.all_statistics

            if isinstance(node.label, str):
                idstring = node.label
            elif isinstance(node.label, tuple):
                idstring = node.label[0]
            else:
                idstring = "all"
                logging.warning("Unrecognized algorithm in Tracer")

            for key in Siever.all_statistics:
                # Siever.all_statistics[key][3] is a list of algorithms for which the statistic
                # indexed by key is meaningful instance.get_stat(key) will return None if support for
                # the statistics was not compiled in Siever.all_statistics[key][1] is a short string
                # that identifies the statistic
                if ((idstring == "all") or
                    (idstring in Siever.all_statistics[key][3])) and (
                        instance.get_stat(key) is not None):
                    if (len(Siever.all_statistics[key]) <= 4):
                        node.data[Siever.all_statistics[key][1]] = Accumulator(
                            0, repr="sum")
                    else:
                        node.data[Siever.all_statistics[key][1]] = Accumulator(
                            0, repr=Siever.all_statistics[key][4])
                    node.data[Siever.all_statistics[key][1]] += node.data.get(
                        Siever.all_statistics[key][1], None)

            try:
                i, length, v = (instance.best_lifts())[0]
                if i == 0:
                    node.data["|v|"] = length
                else:
                    self.instance.update_gso(0, self.instance.full_n)
                    node.data["|v|"] = self.instance.M.get_r(0, 0)
            except (IndexError, AttributeError):
                node.data["|v|"] = None

        data = basis_quality(self.instance.M)
        for k, v in data.items():
            if k == "/":
                node.data[k] = Accumulator(v, repr="max")
            else:
                node.data[k] = Accumulator(v, repr="min")

        if kwds.get("dump_gso", node.level <= 1):
            node.data["r"] = self.instance.M.r()

        verbose_labels = ["tour", "prog_tour"]

        if self.verbosity and node.label[0] in verbose_labels:
            report = OrderedDict()
            report["i"] = node.label[1]
            report["cputime"] = node["cputime"]
            report["walltime"] = node["walltime"]
            try:
                report["preproc"] = node.find("preprocessing", True)["cputime"]
            except KeyError:
                pass
            try:
                report["svp"] = node.find("sieve", True)["cputime"]
                # TODO: re-implement
                # report["sieve sat"] = node.find("sieve", True)["saturation"]
            except KeyError:
                pass

            report["r_0"] = node["r_0"]
            report["/"] = node["/"]

            print(pretty_dict(report))

        self.current = self.current.parent
        return self.trace
示例#9
0
def simulate_prob(r, param, prng_seed=0xdeadbeef):
    """
    BKZ simulation algorithm as proposed by Bai and Stehlé and Wen in "Measuring, simulating and
    exploiting the head concavity phenomenon in BKZ".  Returns the reduced squared norms of the
    GSO vectors of the basis and the number of BKZ tours simulated.  This version terminates when
    no substantial progress is made anymore or at most ``max_loops`` tours were simulated.
    If no ``max_loops`` is given, at most ``d`` tours are performed, where ``d`` is the dimension
    of the lattice.
    :param r: squared norms of the GSO vectors of the basis.
    :param param: BKZ parameters
    EXAMPLE:
        >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ
        >>> FPLLL.set_random_seed(1337)
        >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50))
        >>> M = GSO.Mat(A)
        >>> from fpylll.tools.bkz_simulator import simulate_prob
        >>> _ = simulate_prob(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE))
        {"i":        0,  "r_0":   2^33.1,  "r_0/gh": 5.193166,  "rhf": 1.017512,  "/": -0.07022,  "hv/hv": 2.428125}
        {"i":        1,  "r_0":   2^32.7,  "r_0/gh": 3.997766,  "rhf": 1.016182,  "/": -0.06214,  "hv/hv": 2.168460}
        {"i":        2,  "r_0":   2^32.3,  "r_0/gh": 3.020156,  "rhf": 1.014759,  "/": -0.05808,  "hv/hv": 2.059562}
        {"i":        3,  "r_0":   2^32.2,  "r_0/gh": 2.783102,  "rhf": 1.014344,  "/": -0.05603,  "hv/hv": 2.013191}
    """

    if param.block_size <= 2:
        raise ValueError("The BSW18 simulator requires block size >= 3.")

    # fix PRNG seed
    random.seed(prng_seed if prng_seed else FPLLL.randint(0, 2**32 - 1))

    r = _extract_log_norms(r)

    d = len(r)

    r1 = copy(r)
    r2 = copy(r)
    c = [rk[-j] - sum(rk[-j:]) / j for j in range(1, 46)]
    c += [(lgamma(beta / 2.0 + 1) * (1.0 / beta) - log(sqrt(pi))) / log(2.0)
          for beta in range(46, param.block_size + 1)]

    if param.max_loops:
        N = param.max_loops
    else:
        N = d

    t0 = [True for _ in range(d)]
    for i in range(N):
        t1 = [False for _ in range(d)]
        for k in range(d - min(45, param.block_size)):
            beta = min(param.block_size, d - k)
            f = k + beta
            phi = False
            for kp in range(k, f):
                phi |= t0[kp]
            logV = sum(r1[:f]) - sum(r2[:k])
            if phi:
                X = random.expovariate(.5)
                lma = (log(X, 2) + logV) / beta + c[beta - 1]
                if lma < r1[k]:
                    r2[k] = lma
                    r2[k + 1] = r1[k] + log(sqrt(1 - 1. / beta), 2)
                    dec = (r1[k] - lma) + (r1[k + 1] - r2[k + 1])
                    for j in range(k + 2, f):
                        r2[j] = r1[j] + dec / (beta - 2.)
                        t1[j] = True
                    phi = False

            for j in range(k, f):
                r1[j] = r2[j]

        # early termination
        if True not in t1:
            break

        # last block
        beta = min(45, param.block_size)
        logV = sum(r1) - sum(r2[:-beta])
        if param.block_size < 45:
            rk1 = normalize_GSO_unitary(rk[-beta:])
        else:
            rk1 = rk
        K = range(d - beta, d)
        for k, r in zip(K, rk1):
            r2[k] = logV / beta + r
            t1[k] = True

        # early termination
        if (r1 == r2):
            break
        r1 = copy(r2)
        t0 = copy(t1)

        if param.flags & BKZ.VERBOSE:
            r = OrderedDict()
            r["i"] = i
            for k, v in basis_quality(list(map(lambda x: 2.0**(2 * x),
                                               r1))).items():
                r[k] = v
            print(pretty_dict(r))

    r1 = list(map(lambda x: 2.0**(2 * x), r1))
    return r1, i + 1
示例#10
0
def lwe_kernel(arg0, params=None, seed=None):
    """
    Run the primal attack against Darmstadt LWE instance (n, alpha).

    :param n: the dimension of the LWE-challenge secret
    :param params: parameters for LWE:

        - lwe/alpha: the noise rate of the LWE-challenge

        - lwe/m: the number of samples to use for the primal attack

        - lwe/goal_margin: accept anything that is
          goal_margin * estimate(length of embedded vector)
          as an lwe solution

        - lwe/svp_bkz_time_factor: if > 0, run a larger pump when
          svp_bkz_time_factor * time(BKZ tours so far) is expected
          to be enough time to find a solution

        - bkz/blocksizes: given as low:high:inc perform BKZ reduction
          with blocksizes in range(low, high, inc) (after some light)
          prereduction

        - bkz/tours: the number of tours to do for each blocksize

        - bkz/jump: the number of blocks to jump in a BKZ tour after
          each pump

        - bkz/extra_dim4free: lift to indices extra_dim4free earlier in
          the lattice than the currently sieved block

        - bkz/fpylll_crossover: use enumeration based BKZ from fpylll
          below this blocksize

        - bkz/dim4free_fun: in blocksize x, try f(x) dimensions for free,
          give as 'lambda x: f(x)', e.g. 'lambda x: 11.5 + 0.075*x'

        - pump/down_sieve: sieve after each insert in the pump-down
          phase of the pump

        - dummy_tracer: use a dummy tracer which captures less information

        - verbose: print information throughout the lwe challenge attempt

    """

    # Pool.map only supports a single parameter
    if params is None and seed is None:
        n, params, seed = arg0
    else:
        n = arg0

    params = copy.copy(params)

    # params for underlying BKZ
    extra_dim4free = params.pop("bkz/extra_dim4free")
    jump = params.pop("bkz/jump")
    dim4free_fun = params.pop("bkz/dim4free_fun")
    pump_params = pop_prefixed_params("pump", params)
    fpylll_crossover = params.pop("bkz/fpylll_crossover")
    blocksizes = params.pop("bkz/blocksizes")
    tours = params.pop("bkz/tours")

    # flow of the lwe solver
    svp_bkz_time_factor = params.pop("lwe/svp_bkz_time_factor")
    goal_margin = params.pop("lwe/goal_margin")

    # generation of lwe instance and Kannan's embedding
    alpha = params.pop("lwe/alpha")
    m = params.pop("lwe/m")
    decouple = svp_bkz_time_factor > 0

    # misc
    dont_trace = params.pop("dummy_tracer")
    verbose = params.pop("verbose")

    A, c, q = load_lwe_challenge(n=n, alpha=alpha)
    print "-------------------------"
    print "Primal attack, LWE challenge n=%d, alpha=%.4f" % (n, alpha)

    if m is None:
        try:
            min_cost_param = gsa_params(n=A.ncols, alpha=alpha, q=q,
                                        samples=A.nrows, decouple=decouple)
            (b, s, m) = min_cost_param
        except TypeError:
            raise TypeError("No winning parameters.")
    else:
        try:
            min_cost_param = gsa_params(n=A.ncols, alpha=alpha, q=q, samples=m,
                                        decouple=decouple)
            (b, s, _) = min_cost_param
        except TypeError:
            raise TypeError("No winning parameters.")
    print "Chose %d samples. Predict solution at bkz-%d + svp-%d" % (m, b, s)
    print

    target_norm = goal_margin * (alpha*q)**2 * m + 1

    if blocksizes is not None:
        blocksizes = range(10, 40) + eval("range(%s)" % re.sub(":", ",", blocksizes)) # noqa
    else:
        blocksizes = range(10, 50) + [b-20, b-17] + range(b - 14, b + 25, 2)

    B = primal_lattice_basis(A, c, q, m=m)

    g6k = Siever(B, params)
    print "GSO precision: ", g6k.M.float_type

    if dont_trace:
        tracer = dummy_tracer
    else:
        tracer = SieveTreeTracer(g6k, root_label=("lwe"), start_clocks=True)

    d = g6k.full_n
    g6k.lll(0, g6k.full_n)
    slope = basis_quality(g6k.M)["/"]
    print "Intial Slope = %.5f\n" % slope

    T0 = time.time()
    T0_BKZ = time.time()
    for blocksize in blocksizes:
        for tt in range(tours):
            # BKZ tours

            if blocksize < fpylll_crossover:
                if verbose:
                    print "Starting a fpylll BKZ-%d tour. " % (blocksize),
                    sys.stdout.flush()
                bkz = BKZReduction(g6k.M)
                par = fplll_bkz.Param(blocksize,
                                      strategies=fplll_bkz.DEFAULT_STRATEGY,
                                      max_loops=1)
                bkz(par)

            else:
                if verbose:
                    print "Starting a pnjBKZ-%d tour. " % (blocksize)

                pump_n_jump_bkz_tour(g6k, tracer, blocksize, jump=jump,
                                     verbose=verbose,
                                     extra_dim4free=extra_dim4free,
                                     dim4free_fun=dim4free_fun,
                                     goal_r0=target_norm,
                                     pump_params=pump_params)

            T_BKZ = time.time() - T0_BKZ

            if verbose:
                slope = basis_quality(g6k.M)["/"]
                fmt = "slope: %.5f, walltime: %.3f sec"
                print fmt % (slope, time.time() - T0)

            g6k.lll(0, g6k.full_n)

            if g6k.M.get_r(0, 0) <= target_norm:
                break

            # overdoing n_max would allocate too much memory, so we are careful
            svp_Tmax = svp_bkz_time_factor * T_BKZ
            n_max = int(58 + 2.85 * log(svp_Tmax * params.threads)/log(2.))

            rr = [g6k.M.get_r(i, i) for i in range(d)]
            for n_expected in range(2, d-2):
                x = (target_norm/goal_margin) * n_expected/(1.*d)
                if 4./3 * gaussian_heuristic(rr[d-n_expected:]) > x:
                    break

            print "Without otf, would expect solution at pump-%d. n_max=%d in the given time." % (n_expected, n_max) # noqa
            if n_expected >= n_max - 1:
                continue

            n_max += 1

            # Larger SVP

            llb = d - blocksize
            while gaussian_heuristic([g6k.M.get_r(i, i) for i in range(llb, d)]) < target_norm * (d - llb)/(1.*d): # noqa
                llb -= 1

            f = d-llb-n_max
            if verbose:
                print "Starting svp pump_{%d, %d, %d}, n_max = %d, Tmax= %.2f sec" % (llb, d-llb, f, n_max, svp_Tmax) # noqa
            pump(g6k, tracer, llb, d-llb, f, verbose=verbose,
                 goal_r0=target_norm * (d - llb)/(1.*d))

            if verbose:
                slope = basis_quality(g6k.M)["/"]
                fmt = "\n slope: %.5f, walltime: %.3f sec"
                print fmt % (slope, time.time() - T0)
                print

            g6k.lll(0, g6k.full_n)
            T0_BKZ = time.time()
            if g6k.M.get_r(0, 0) <= target_norm:
                break

        if g6k.M.get_r(0, 0) <= target_norm:
            print "Finished! TT=%.2f sec" % (time.time() - T0)
            print g6k.M.B[0]
            alpha_ = int(alpha*1000)
            filename = 'lwechallenge/%03d-%03d-solution.txt' % (n, alpha_)
            fn = open(filename, "w")
            fn.write(str(g6k.M.B[0]))
            fn.close()
            return

    raise ValueError("No solution found.")
示例#11
0
    def exit(self, **kwds):  # noqa, shut up linter about this function being too complex
        """
        By default CPU and wall time are recorded.  More information is recorded for "enumeration"
        and "tour" labels.  When the label is a tour then the status is printed if verbosity > 0.
        """
        node = self.current
        label = node.label

        node.data["cputime"] += process_time()
        node.data["walltime"] += time.time()

        if kwds.get("dump_gso", False):
            node.data["r"] = node.data.get("r", []) + [self.instance.M.r()]

        if label == "enumeration":
            full = kwds.get("full", True)
            if full:
                try:
                    node.data["#enum"] = Accumulator(
                        kwds["enum_obj"].get_nodes(), repr="sum"
                    ) + node.data.get(
                        "#enum", None
                    )  # noqa
                except KeyError:
                    pass
                try:
                    node.data["%"] = Accumulator(kwds["probability"], repr="avg") + node.data.get(
                        "%", None
                    )
                except KeyError:
                    pass

        if label[0] == "tour":
            data = basis_quality(self.instance.M)
            for k, v in data.items():
                if k == "/":
                    node.data[k] = Accumulator(v, repr="max")
                else:
                    node.data[k] = Accumulator(v, repr="min")

        if self.verbosity and label[0] == "tour":
            report = OrderedDict()
            report["i"] = label[1]
            report["cputime"] = node["cputime"]
            report["walltime"] = node["walltime"]
            try:
                report["preproc"] = node.find("preprocessing", True)["cputime"]
            except KeyError:
                pass
            try:
                report["svp"] = node.find("enumeration", True)["cputime"]
            except KeyError:
                pass
            report["#enum"] = node.sum("#enum")
            report["lll"] = node.sum("cputime", label="lll")
            try:
                report["pruner"] = node.find("pruner", True)["cputime"]
            except KeyError:
                pass
            report["r_0"] = node["r_0"]
            report["/"] = node["/"]

            print(pretty_dict(report))

        self._pop()
示例#12
0
def ntru_kernel(arg0, params=None, seed=None):
    """
    Run the primal attack against Darmstadt LWE instance (n, alpha).

    :param n: the dimension of the LWE-challenge secret
    :param params: parameters for LWE:

        - lwe/alpha: the noise rate of the LWE-challenge

        - lwe/m: the number of samples to use for the primal attack

        - lwe/goal_margin: accept anything that is
          goal_margin * estimate(length of embedded vector)
          as an lwe solution

        - lwe/svp_bkz_time_factor: if > 0, run a larger pump when
          svp_bkz_time_factor * time(BKZ tours so far) is expected
          to be enough time to find a solution

        - bkz/blocksizes: given as low:high:inc perform BKZ reduction
          with blocksizes in range(low, high, inc) (after some light)
          prereduction

        - bkz/tours: the number of tours to do for each blocksize

        - bkz/jump: the number of blocks to jump in a BKZ tour after
          each pump

        - bkz/extra_dim4free: lift to indices extra_dim4free earlier in
          the lattice than the currently sieved block

        - bkz/fpylll_crossover: use enumeration based BKZ from fpylll
          below this blocksize

        - bkz/dim4free_fun: in blocksize x, try f(x) dimensions for free,
          give as 'lambda x: f(x)', e.g. 'lambda x: 11.5 + 0.075*x'

        - pump/down_sieve: sieve after each insert in the pump-down
          phase of the pump

        - dummy_tracer: use a dummy tracer which captures less information

        - verbose: print information throughout the lwe challenge attempt

    """

    # Pool.map only supports a single parameter
    if params is None and seed is None:
        n, params, seed = arg0
    else:
        n = arg0

    params = copy.copy(params)

    # params for underlying BKZ
    extra_dim4free = params.pop("bkz/extra_dim4free")
    jump = params.pop("bkz/jump")
    dim4free_fun = params.pop("bkz/dim4free_fun")
    pump_params = pop_prefixed_params("pump", params)
    fpylll_crossover = params.pop("bkz/fpylll_crossover")
    blocksizes = params.pop("bkz/blocksizes")
    tours = params.pop("bkz/tours")

    # flow of the lwe solver
    svp_bkz_time_factor = params.pop("lwe/svp_bkz_time_factor")
    goal_margin = params.pop("lwe/goal_margin")

    # generation of lwe instance and Kannan's embedding
    alpha = params.pop("lwe/alpha")
    m = params.pop("lwe/m")
    decouple = svp_bkz_time_factor > 0

    # misc
    dont_trace = params.pop("dummy_tracer")
    verbose = params.pop("verbose")

    filename = 'ntru_n_'+str(n)+'.txt'
    H, q = read_ntru_from_file(filename)

    print("-------------------------")
    print("Hybrid attack on NTRU n=%d" %n)


    # compute the attack parameters
    paramset_NTRU1 = {'n': n, 'q': q, 'w': 2*(n/3)}
    print(paramset_NTRU1)
    beta, g, rt = plain_hybrid_compleixty(paramset_NTRU1, verbose = True)

    B = ntru_plain_hybrid_basis(A, g, q, m=m)

    g6k = Siever(B, params)
    print("GSO precision: ", g6k.M.float_type)

    if dont_trace:
        tracer = dummy_tracer
    else:
        tracer = SieveTreeTracer(g6k, root_label=("ntru"), start_clocks=True)

    d = g6k.full_n
    g6k.lll(0, g6k.full_n)
    slope = basis_quality(g6k.M)["/"]
    print("Intial Slope = %.5f\n" % slope)

    T0 = time.time()
    T0_BKZ = time.time()
    for blocksize in blocksizes:
        for tt in range(tours):
            # BKZ tours

            if blocksize < fpylll_crossover:
                if verbose:
                    print("Starting a fpylll BKZ-%d tour. " % (blocksize), end=' ')
                    sys.stdout.flush()
                bkz = BKZReduction(g6k.M)
                par = fplll_bkz.Param(blocksize,
                                      strategies=fplll_bkz.DEFAULT_STRATEGY,
                                      max_loops=1)
                bkz(par)

            else:
                if verbose:
                    print("Starting a pnjBKZ-%d tour. " % (blocksize))

                pump_n_jump_bkz_tour(g6k, tracer, blocksize, jump=jump,
                                     verbose=verbose,
                                     extra_dim4free=extra_dim4free,
                                     dim4free_fun=dim4free_fun,
                                     goal_r0=target_norm,
                                     pump_params=pump_params)

            T_BKZ = time.time() - T0_BKZ

            if verbose:
                slope = basis_quality(g6k.M)["/"]
                fmt = "slope: %.5f, walltime: %.3f sec"
                print(fmt % (slope, time.time() - T0))

            g6k.lll(0, g6k.full_n)

            if g6k.M.get_r(0, 0) <= target_norm:
                break

            # overdoing n_max would allocate too much memory, so we are careful
            svp_Tmax = svp_bkz_time_factor * T_BKZ
            n_max = int(58 + 2.85 * log(svp_Tmax * params.threads)/log(2.))

            rr = [g6k.M.get_r(i, i) for i in range(d)]
            for n_expected in range(2, d-2):
                x = (target_norm/goal_margin) * n_expected/(1.*d)
                if 4./3 * gaussian_heuristic(rr[d-n_expected:]) > x:
                    break

            print("Without otf, would expect solution at pump-%d. n_max=%d in the given time." % (n_expected, n_max)) # noqa
            if n_expected >= n_max - 1:
                continue

            n_max += 1

            # Larger SVP

            llb = d - blocksize
            while gaussian_heuristic([g6k.M.get_r(i, i) for i in range(llb, d)]) < target_norm * (d - llb)/(1.*d): # noqa
                llb -= 1

            f = d-llb-n_max
            if verbose:
                print("Starting svp pump_{%d, %d, %d}, n_max = %d, Tmax= %.2f sec" % (llb, d-llb, f, n_max, svp_Tmax)) # noqa
            pump(g6k, tracer, llb, d-llb, f, verbose=verbose,
                 goal_r0=target_norm * (d - llb)/(1.*d))

            if verbose:
                slope = basis_quality(g6k.M)["/"]
                fmt = "\n slope: %.5f, walltime: %.3f sec"
                print(fmt % (slope, time.time() - T0))
                print()

            g6k.lll(0, g6k.full_n)
            T0_BKZ = time.time()
            if g6k.M.get_r(0, 0) <= target_norm:
                break

        if g6k.M.get_r(0, 0) <= target_norm:
            print("Finished! TT=%.2f sec" % (time.time() - T0))
            print(g6k.M.B[0])
            alpha_ = int(alpha*1000)
            filename = 'lwechallenge/%03d-%03d-solution.txt' % (n, alpha_)
            fn = open(filename, "w")
            fn.write(str(g6k.M.B[0]))
            fn.close()
            return
	"""
    raise ValueError("No solution found.")

def ntru():
    """
    Attempt to solve an ntru instance.

    """
    description = ntru.__doc__

    args, all_params = parse_args(description,
                                  ntru__m=None,
                                  lwe__goal_margin=1.5,
                                  lwe__svp_bkz_time_factor=1,
                                  bkz__blocksizes=None,
                                  bkz__tours=1,
                                  bkz__jump=1,
                                  bkz__extra_dim4free=12,
                                  bkz__fpylll_crossover=51,
                                  bkz__dim4free_fun="default_dim4free_fun",
                                  pump__down_sieve=True,
                                  dummy_tracer=True,  # set to control memory
                                  verbose=True
                                  )

    stats = run_all(ntru_kernel, list(all_params.values()), # noqa
                    lower_bound=args.lower_bound,
                    upper_bound=args.upper_bound,
                    step_size=args.step_size,
                    trials=args.trials,
                    workers=args.workers,
                    seed=args.seed)