Exemple #1
0
    def wait_on(self, outputs, todo, sleep=1):
        """Wait for jobs in ``todo`` to return and store results in ``outputs``.

        :param outputs: store results here
        :param todo: these are running jobs
        :param sleep: seconds to sleep before checking if new results are availabl.

        """

        fmtstr = self._majorminor_format_str()

        while todo:
            collect = [(tag, res) for (tag, res) in todo if res.ready()]

            for tag, res in collect:
                major, minor = tag
                try:
                    res = res.get()
                    if major not in outputs:
                        outputs[major] = []
                    outputs[major].append((minor, res))
                    self.logger.debug(fmtstr %
                                      (major, minor, pretty_dict(res.data)))

                    if self.pickle_jar is not None:
                        Conductor.dump(self.outputs, self.pickle_jar)

                except ReductionError:
                    self.logger.debug("ReductionError for %s(%s)." %
                                      (major, minor))

            todo = todo.difference(collect)
            time.sleep(sleep)

        return outputs
Exemple #2
0
    def log_averages(self, tags, outputs):
        """
        Log average values for all entries tagged as ``tags`` in ``outputs``.
        """
        fmtstr = self._majorminor_format_str()
        avg = OrderedDict()

        for major, minor in tags:
            if major in avg:
                continue
            avg[major] = OrderedDict()
            n = len(outputs[major])
            for minor, output in outputs[major]:
                for k, v in output.data.items():
                    avg[major][k] = avg[major].get(k, 0.0) + float(v) / n

            self.logger.info(fmtstr % (major, "avg", pretty_dict(avg[major])))
Exemple #3
0
    def exit(self, **kwds):
        """
        When the label is a tour then the status is printed if verbosity > 0.
        """
        node = self.current
        label = node.label

        if label[0] == "tour":
            data = basis_quality([2 ** (2 * r_) for r_ in self.instance.r])
            for k, v in data.items():
                if k == "/":
                    node.data[k] = Accumulator(v, repr="max")
                else:
                    node.data[k] = Accumulator(v, repr="min")

        if self.verbosity and label[0] == "tour":
            report = OrderedDict()
            report["i"] = label[1]
            report["#enum"] = node.sum("#enum")
            report["r_0"] = node["r_0"]
            report["/"] = node["/"]
            print(pretty_dict(report))

        self.current = self.current.parent
Exemple #4
0
def simulate(r, param):
    """
    BKZ simulation algorithm as proposed by Chen and Nguyen in "BKZ 2.0: Better Lattice Security
    Estimates".  Returns the reduced squared norms of the GSO vectors of the basis and the number of
    BKZ tours simulated.  This version terminates when no substantial progress is made anymore or at
    most ``max_loops`` tours were simulated.  If no ``max_loops`` is given, at most ``d`` tours are
    performed, where ``d`` is the dimension of the lattice.

    :param r: squared norms of the GSO vectors of the basis.
    :param param: BKZ parameters

    EXAMPLE:

        >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ
        >>> FPLLL.set_random_seed(1337)
        >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50))
        >>> M = GSO.Mat(A)

        >>> from fpylll.tools.bkz_simulator import simulate
        >>> _ = simulate(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE))
        {"i":        0,  "r_0":   2^33.3,  "r_0/gh": 6.110565,  "rhf": 1.018340,  "/": -0.07013,  "hv/hv": 2.424131}
        {"i":        1,  "r_0":   2^32.7,  "r_0/gh": 4.018330,  "rhf": 1.016208,  "/": -0.06161,  "hv/hv": 2.156298}
        {"i":        2,  "r_0":   2^32.3,  "r_0/gh": 2.973172,  "rhf": 1.014679,  "/": -0.05745,  "hv/hv": 2.047014}
        {"i":        3,  "r_0":   2^32.1,  "r_0/gh": 2.583479,  "rhf": 1.013966,  "/": -0.05560,  "hv/hv": 2.000296}


    """

    if isinstance(r, IntegerMatrix):
        r = GSO.Mat(r)
    if isinstance(r, MatGSO):
        r.update_gso()
        r = r.r()

    d = len(r)

    # code uses log2 of norms, FPLLL uses squared norms
    r = list(map(lambda x: log(x, 2) / 2., r))

    r1 = copy(r)
    r2 = copy(r)
    c = [rk[-i] - sum(rk[-i:]) / i for i in range(1, 46)]
    c += [(lgamma(beta / 2. + 1) * (1. / beta) - log(sqrt(pi))) / log(2.)
          for beta in range(46, param.block_size + 1)]

    if param.max_loops:
        max_loops = param.max_loops
    else:
        max_loops = d

    for i in range(max_loops):
        phi = True
        for k in range(d - min(45, param.block_size)):
            beta = min(param.block_size, d - k)
            f = k + beta
            logV = sum(r1[:f]) - sum(r2[:k])
            lma = logV / beta + c[beta - 1]
            if phi:
                if lma < r1[k]:
                    r2[k] = lma
                    phi = False
            else:
                r2[k] = lma

        # early termination
        if phi or r1 == r2:
            break
        else:
            beta = min(45, param.block_size)
            logV = sum(r1) - sum(r2[:-beta])

            if param.block_size < 45:
                tmp = sum(rk[-param.block_size:]) / param.block_size
                rk1 = [r_ - tmp for r_ in rk[-param.block_size:]]
            else:
                rk1 = rk

            for k, r in zip(range(d - beta, d), rk1):
                r2[k] = logV / beta + r
            r1 = copy(r2)

        if param.flags & BKZ.VERBOSE:
            r = OrderedDict()
            r["i"] = i
            for k, v in basis_quality(list(map(lambda x: 2.**(2 * x),
                                               r1))).items():
                r[k] = v
            print(pretty_dict(r))

    r1 = list(map(lambda x: 2.**(2 * x), r1))
    return r1, i + 1
Exemple #5
0
    def exit(self, **kwds):
        """
        By default CPU and wall time are recorded.  More information is recorded for sieve labels.
        """
        node = self.current

        node.data["cputime"] += process_time()
        node.data["walltime"] += time.time()

        self.instance.M.update_gso()

        if self.is_sieve_node(node.label):
            if isinstance(self.instance, Siever):
                instance = self.instance
            else:
                instance = self.instance.sieve

            node.data["|db|"] = Accumulator(
                len(instance), repr="max") + node.data.get("|db|", None)

            # determine the type of sieve:

            # idstring should be among SieveTreeTraces.recognized_sieves or "all".
            # This is used to look up what statistics to include in Siever.all_statistics

            if isinstance(node.label, str):
                idstring = node.label
            elif isinstance(node.label, tuple):
                idstring = node.label[0]
            else:
                idstring = "all"
                logging.warning("Unrecognized algorithm in Tracer")

            for key in Siever.all_statistics:
                # Siever.all_statistics[key][3] is a list of algorithms for which the statistic
                # indexed by key is meaningful instance.get_stat(key) will return None if support for
                # the statistics was not compiled in Siever.all_statistics[key][1] is a short string
                # that identifies the statistic
                if ((idstring == "all") or
                    (idstring in Siever.all_statistics[key][3])) and (
                        instance.get_stat(key) is not None):
                    if (len(Siever.all_statistics[key]) <= 4):
                        node.data[Siever.all_statistics[key][1]] = Accumulator(
                            0, repr="sum")
                    else:
                        node.data[Siever.all_statistics[key][1]] = Accumulator(
                            0, repr=Siever.all_statistics[key][4])
                    node.data[Siever.all_statistics[key][1]] += node.data.get(
                        Siever.all_statistics[key][1], None)

            try:
                i, length, v = (instance.best_lifts())[0]
                if i == 0:
                    node.data["|v|"] = length
                else:
                    self.instance.update_gso(0, self.instance.full_n)
                    node.data["|v|"] = self.instance.M.get_r(0, 0)
            except (IndexError, AttributeError):
                node.data["|v|"] = None

        data = basis_quality(self.instance.M)
        for k, v in data.items():
            if k == "/":
                node.data[k] = Accumulator(v, repr="max")
            else:
                node.data[k] = Accumulator(v, repr="min")

        if kwds.get("dump_gso", node.level <= 1):
            node.data["r"] = self.instance.M.r()

        verbose_labels = ["tour", "prog_tour"]

        if self.verbosity and node.label[0] in verbose_labels:
            report = OrderedDict()
            report["i"] = node.label[1]
            report["cputime"] = node["cputime"]
            report["walltime"] = node["walltime"]
            try:
                report["preproc"] = node.find("preprocessing", True)["cputime"]
            except KeyError:
                pass
            try:
                report["svp"] = node.find("sieve", True)["cputime"]
                # TODO: re-implement
                # report["sieve sat"] = node.find("sieve", True)["saturation"]
            except KeyError:
                pass

            report["r_0"] = node["r_0"]
            report["/"] = node["/"]

            print(pretty_dict(report))

        self.current = self.current.parent
        return self.trace
Exemple #6
0
def simulate_prob(r, param, prng_seed=0xdeadbeef):
    """
    BKZ simulation algorithm as proposed by Bai and Stehlé and Wen in "Measuring, simulating and
    exploiting the head concavity phenomenon in BKZ".  Returns the reduced squared norms of the
    GSO vectors of the basis and the number of BKZ tours simulated.  This version terminates when
    no substantial progress is made anymore or at most ``max_loops`` tours were simulated.
    If no ``max_loops`` is given, at most ``d`` tours are performed, where ``d`` is the dimension
    of the lattice.
    :param r: squared norms of the GSO vectors of the basis.
    :param param: BKZ parameters
    EXAMPLE:
        >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ
        >>> FPLLL.set_random_seed(1337)
        >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50))
        >>> M = GSO.Mat(A)
        >>> from fpylll.tools.bkz_simulator import simulate_prob
        >>> _ = simulate_prob(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE))
        {"i":        0,  "r_0":   2^33.1,  "r_0/gh": 5.193166,  "rhf": 1.017512,  "/": -0.07022,  "hv/hv": 2.428125}
        {"i":        1,  "r_0":   2^32.7,  "r_0/gh": 3.997766,  "rhf": 1.016182,  "/": -0.06214,  "hv/hv": 2.168460}
        {"i":        2,  "r_0":   2^32.3,  "r_0/gh": 3.020156,  "rhf": 1.014759,  "/": -0.05808,  "hv/hv": 2.059562}
        {"i":        3,  "r_0":   2^32.2,  "r_0/gh": 2.783102,  "rhf": 1.014344,  "/": -0.05603,  "hv/hv": 2.013191}
    """

    if param.block_size <= 2:
        raise ValueError("The BSW18 simulator requires block size >= 3.")

    # fix PRNG seed
    random.seed(prng_seed if prng_seed else FPLLL.randint(0, 2**32 - 1))

    r = _extract_log_norms(r)

    d = len(r)

    r1 = copy(r)
    r2 = copy(r)
    c = [rk[-j] - sum(rk[-j:]) / j for j in range(1, 46)]
    c += [(lgamma(beta / 2.0 + 1) * (1.0 / beta) - log(sqrt(pi))) / log(2.0)
          for beta in range(46, param.block_size + 1)]

    if param.max_loops:
        N = param.max_loops
    else:
        N = d

    t0 = [True for _ in range(d)]
    for i in range(N):
        t1 = [False for _ in range(d)]
        for k in range(d - min(45, param.block_size)):
            beta = min(param.block_size, d - k)
            f = k + beta
            phi = False
            for kp in range(k, f):
                phi |= t0[kp]
            logV = sum(r1[:f]) - sum(r2[:k])
            if phi:
                X = random.expovariate(.5)
                lma = (log(X, 2) + logV) / beta + c[beta - 1]
                if lma < r1[k]:
                    r2[k] = lma
                    r2[k + 1] = r1[k] + log(sqrt(1 - 1. / beta), 2)
                    dec = (r1[k] - lma) + (r1[k + 1] - r2[k + 1])
                    for j in range(k + 2, f):
                        r2[j] = r1[j] + dec / (beta - 2.)
                        t1[j] = True
                    phi = False

            for j in range(k, f):
                r1[j] = r2[j]

        # early termination
        if True not in t1:
            break

        # last block
        beta = min(45, param.block_size)
        logV = sum(r1) - sum(r2[:-beta])
        if param.block_size < 45:
            rk1 = normalize_GSO_unitary(rk[-beta:])
        else:
            rk1 = rk
        K = range(d - beta, d)
        for k, r in zip(K, rk1):
            r2[k] = logV / beta + r
            t1[k] = True

        # early termination
        if (r1 == r2):
            break
        r1 = copy(r2)
        t0 = copy(t1)

        if param.flags & BKZ.VERBOSE:
            r = OrderedDict()
            r["i"] = i
            for k, v in basis_quality(list(map(lambda x: 2.0**(2 * x),
                                               r1))).items():
                r[k] = v
            print(pretty_dict(r))

    r1 = list(map(lambda x: 2.0**(2 * x), r1))
    return r1, i + 1
Exemple #7
0
    def __call__(self, jobs, current=None):
        """
        Call ``jobs`` in parallel.

        The parameter jobs is a list with the following format.  Each entry is one of the following:

            - a tuple ``((major, minor), (BKZ, A, block_size, tours, progressive_step_size))``,
              where ``major`` and ``minor`` are arbitrary hashable tags and the rest are valid
              inputs to ``play``.

            - A list with elements of the same format as above.

        Entries at the same level are considered to be a group.  All jobs in the same group go into
        the same execution pool.  At the end of the execution of a group the average across all
        ``minor`` tags of a ``major`` tag are shown.

        ..  note :: Recursive jobs, i.e. those in a sub-list are run first, this is an
            implementation artefact.  Typically, we don't expect jobs and lists of jobs to be mixed at
            the same level, though, this is supported.

        """
        inputs = OrderedDict()
        if current is None:
            current = self.outputs

        # filter out sub-jobs that should be grouped and call recursively
        for tag, job in jobs:
            if isinstance(job[0], (list, tuple)):
                self.logger.info("")
                self.logger.info("# %s (size: %d) #" % (tag, len(job)))
                current[tag] = OrderedDict()
                self(job, current=current[tag])
            else:
                major, minor = tag
                self._update_strlens(major, minor)
                if major not in current:
                    current[major] = list()
                inputs[tag] = job

        self.logger.debug("")

        # base case
        if self.threads > 1:
            todo = set()
            for tag in inputs:
                todo.add((tag, self.pool.apply_async(play, inputs[tag])))

            current = self.wait_on(current, todo)

        else:
            fmtstr = self._majorminor_format_str()
            for tag in inputs:
                major, minor = tag
                try:
                    res = play(*inputs[tag])
                    current[major].append((minor, res))
                    self.logger.debug(fmtstr %
                                      (major, minor, pretty_dict(res.data)))

                    if self.pickle_jar is not None:
                        Conductor.dump(self.outputs, self.pickle_jar)

                except ReductionError:
                    self.logger.debug("ReductionError for %s(%s)." %
                                      (major, minor))

        self.logger.debug("")

        # print averages per major tag
        self.log_averages(inputs.keys(), current)

        if self.pickle_jar is not None:
            Conductor.dump(self.outputs, self.pickle_jar)

        return self.outputs
    def __call__(self, seed, threads=2, samples=2, tours=1):
        """

        :param seed: A random seed, each matrix will be created with seed increased by one
        :param threads: number of threads to use
        :param samples: number of reductions to perform
        :param tours: number of BKZ tours to run

        """

        logger = logging.getLogger("compare")

        results = OrderedDict()

        fmtstring = "  %%%ds" % max(
            [len(BKZ_.__name__) for BKZ_ in self.classes])

        for dimension in self.dimensions:
            results[dimension] = OrderedDict()

            for block_size in self.block_sizes:

                seed_ = seed

                if dimension < block_size:
                    continue

                L = OrderedDict([(BKZ_.__name__, OrderedDict())
                                 for BKZ_ in self.classes])

                logger.info("dimension: %3d, block_size: %2d" %
                            (dimension, block_size))

                tasks = []

                matrixf = self.matrixf(dimension=dimension,
                                       block_size=block_size)

                for i in range(samples):
                    set_random_seed(seed_)
                    A = IntegerMatrix.random(dimension, **matrixf)

                    for BKZ_ in self.classes:
                        args = (BKZ_, A, block_size, tours,
                                self.progressive_step_size)
                        tasks.append(((seed_, BKZ_), args))

                    seed_ += 1

                if threads > 1:
                    pool = Pool(processes=threads)
                    tasks = dict([(key, pool.apply_async(bkz_call, args_))
                                  for key, args_ in tasks])
                    pool.close()

                    while tasks:
                        ready = [key for key in tasks if tasks[key].ready()]
                        for key in ready:
                            seed_, BKZ_ = key
                            try:
                                trace_ = tasks[key].get()
                                L[BKZ_.__name__][seed_] = trace_
                                logger.debug(fmtstring % (BKZ_.__name__) +
                                             " 0x%08x %s" %
                                             (seed_, pretty_dict(trace_.data)))
                            except ReductionError:
                                logger.debug(
                                    "ReductionError in %s with seed 0x%08x" %
                                    (BKZ_.__name__, seed_))
                            del tasks[key]

                        time.sleep(1)
                else:
                    for key, args_ in tasks:
                        seed_, BKZ_ = key
                        try:
                            trace_ = apply(bkz_call, args_)
                            L[BKZ_.__name__][seed_] = trace_
                            logger.debug(fmtstring % (BKZ_.__name__) +
                                         " 0x%08x %s" %
                                         (seed_, pretty_dict(trace_.data)))
                        except ReductionError:
                            logger.debug(
                                "ReductionError in %s with seed 0x%08x" %
                                (BKZ_.__name__, seed_))

                logger.debug("")
                for name, vals in L.items():
                    if vals:
                        vals = OrderedDict(
                            zip(
                                vals.items()[0][1].data,
                                zip(*
                                    [d[1].data.values()
                                     for d in vals.items()])))
                        vals = OrderedDict((k, float(sum(v)) / len(v))
                                           for k, v in vals.items())
                        logger.info(fmtstring % (name) + "    average %s" %
                                    (pretty_dict(vals)))

                logger.info("")
                results[dimension][block_size] = L

                self.write_log(results)

        return results