Exemplo n.º 1
0
def callback_roundtrip(alive, k, connections, data):
    """
    Send ``data`` on ``connections`` for processes ids in ``alive``, ``k`` at a time.

    :param alive:
    :param k:
    :param connections:
    :param data:
    """
    callback = [None] * len(connections)

    for chunk in chunk_iterator(alive, k):
        for i in chunk:
            connections[i].send(data)

        for i in chunk:
            try:
                callback[i] = connections[i].recv()
            except EOFError:
                callback[i] = None
                connections[i].close()

    return callback
Exemplo n.º 2
0
def callback_roundtrip(alive, k, connections, data):
    """
    Send ``data`` on ``connections`` for processes ids in ``alive``, ``k`` at a time.

    :param alive:
    :param k:
    :param connections:
    :param data:
    """
    callback = [None]*len(connections)

    for chunk in chunk_iterator(alive, k):
        for i in chunk:
            connections[i].send(data)

        for i in chunk:
            try:
                callback[i] = connections[i].recv()
            except EOFError:
                callback[i] = None
                connections[i].close()

    return callback
Exemplo n.º 3
0
def discover_strategy(block_size,
                      Strategizer,
                      strategies,
                      nthreads=1,
                      nsamples=50):
    """Discover a strategy using ``Strategizer``

    :param block_size: block size to try
    :param Strategizer: strategizer to use
    :param strategies: strategies for smaller block sizes
    :param nthreads: number of threads to run
    :param nsamples: number of lattice bases to consider
    :param subprocess:

    """
    connections = []
    processes = []
    k = nthreads
    m = nsamples

    strategizer = Strategizer(block_size)

    # everybody is alive in the beginning
    alive = range(m)

    return_queue = Queue()

    for i in range(m):
        manager, worker = Pipe()
        connections.append((manager, worker))
        strategies_ = list(strategies)
        strategies_.append(Strategizer.Strategy(block_size, worker))

        # note: success probability, rerandomisation density etc. can be adapted here
        param = Param(block_size=block_size,
                      strategies=strategies_,
                      flags=BKZ.GH_BND)
        process = Process(target=worker_process,
                          args=(2**16 * block_size + i, param, return_queue))
        processes.append(process)

    callback = [None] * m
    for chunk in chunk_iterator(alive, k):
        for i in chunk:
            process = processes[i]
            process.start()
            manager, worker = connections[i]
            worker.close()
            connections[i] = manager

        # wait for `k` responses
        for i in chunk:
            callback[i] = connections[i].recv()

    assert all(callback)  # everybody wants preprocessing parameters

    preproc_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, preproc_params)
    assert all(callback)  # everybody wants pruning parameters

    pruning_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, pruning_params)
    assert not any(callback)  # no more questions

    strategy = Strategy(block_size=block_size,
                        preprocessing_block_sizes=preproc_params,
                        pruning_parameters=pruning_params)

    active_children()

    stats = []
    for i in range(m):
        stats.append(return_queue.get())

    return strategy, tuple(stats), tuple(strategizer.queries)
Exemplo n.º 4
0
def compare_strategies(strategies_list,
                       jobs=1,
                       nsamples=50,
                       min_block_size=3,
                       max_block_size=None,
                       threads=1):
    """Run ``m`` experiments using ``jobs`` to time one SVP
    reduction for each strategy in ``strategies``.

    :param strategies_list: a list of lists of strategies
    :param jobs: number of jobs to run in parallel
    :param m: number of experiments to run, as the block size increases, the number of experiments is
              automatically reduced to ≥ ``max(32,jobs)``
    :param min_block_size: ignore block sizes smaller than this
    :param max_block_size: ignore block sizes bigger than this
    :param threads: number of threads to use per job

    """
    results = OrderedDict()

    if max_block_size is None:
        max_block_size = min_block_size
        for strategies in strategies_list:
            for strategy in strategies:
                if strategy.block_size > max_block_size:
                    max_block_size = strategy.block_size

    S = dict([(bs, []) for bs in range(min_block_size, max_block_size + 1)])
    for strategies in strategies_list:
        for strategy in strategies:
            if strategy.block_size not in S:
                # logger.warning("ignoring block_size: %3d of %s", strategy.block_size, strategy)
                continue
            S[strategy.block_size].append(strategies)

    results = [[] for bs in range(max_block_size + 1)]

    for block_size in range(min_block_size, max_block_size + 1):
        logger.info("= block size: %3d, m: %3d =", block_size, nsamples)
        for strategies in S[block_size]:

            return_queue = Queue()
            result = OrderedDict([("strategy", strategies[block_size]),
                                  ("total time", None)])

            stats = []
            # 2. run `k` processes in parallel until first callback
            for chunk in chunk_iterator(range(nsamples), jobs):
                processes = []
                for i in chunk:
                    seed = 2**16 * block_size + i
                    param = BKZ.Param(block_size=block_size,
                                      strategies=list(strategies),
                                      flags=BKZ.VERBOSE | BKZ.GH_BND)
                    param["threads"] = threads
                    if jobs > 1:
                        process = Process(target=svp_time,
                                          args=(seed, param, return_queue))
                        processes.append(process)
                        process.start()
                    else:
                        stats.append(svp_time(seed, param, None))

                active_children()

                if jobs > 1:
                    for process in processes:
                        # process.join()  # NOTE this can block, but return_queue.get() blocks anyway
                        stats.append(return_queue.get())

            total_time = sum([float(stat.data["cputime"])
                              for stat in stats]) / nsamples
            total_walltime = sum(
                [float(stat.data["walltime"]) for stat in stats]) / nsamples
            length = sum([stat.data["delta"] for stat in stats]) / nsamples
            logger.info(
                "t: %10.4fs, w: %10.4fs, %s, %.5f" %
                (total_time, total_walltime, strategies[block_size], length))

            result["total time"] = total_time
            result["total walltime"] = total_walltime
            result["length"] = length
            result["stats"] = stats

            results[block_size].append(result)

        logger.info("")
        if results[block_size][0]["total time"] > 1.0 and nsamples > 2 * max(
                32, jobs):
            nsamples //= 2

    return results
Exemplo n.º 5
0
def discover_strategy(block_size, Strategizer, strategies,
                      nthreads=1, nsamples=50):
    """Discover a strategy using ``Strategizer``

    :param block_size: block size to try
    :param Strategizer: strategizer to use
    :param strategies: strategies for smaller block sizes
    :param nthreads: number of threads to run
    :param nsamples: number of lattice bases to consider
    :param subprocess:

    """
    connections = []
    processes = []
    k = nthreads
    m = nsamples

    strategizer = Strategizer(block_size)

    # everybody is alive in the beginning
    alive = range(m)

    return_queue = Queue()

    for i in range(m):
        manager, worker = Pipe()
        connections.append((manager, worker))
        strategies_ = list(strategies)
        strategies_.append(Strategizer.Strategy(block_size, worker))

        # note: success probability, rerandomisation density etc. can be adapted here
        param = Param(block_size=block_size, strategies=strategies_, flags=BKZ.GH_BND)
        process = Process(target=worker_process, args=(2**16 * block_size + i, param, return_queue))
        processes.append(process)

    callback = [None]*m
    for chunk in chunk_iterator(alive, k):
        for i in chunk:
            process = processes[i]
            process.start()
            manager, worker = connections[i]
            worker.close()
            connections[i] = manager

        # wait for `k` responses
        for i in chunk:
            callback[i] = connections[i].recv()

    assert all(callback)  # everybody wants preprocessing parameters

    preproc_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, preproc_params)
    assert all(callback)  # everybody wants pruning parameters

    pruning_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, pruning_params)
    assert not any(callback)  # no more questions

    strategy = Strategy(block_size=block_size,
                        preprocessing_block_sizes=preproc_params,
                        pruning_parameters=pruning_params)

    active_children()

    stats = []
    for i in range(m):
        stats.append(return_queue.get())

    return strategy, tuple(stats), tuple(strategizer.queries)
Exemplo n.º 6
0
def compare_strategies(strategies_list, nthreads=1, nsamples=50,
                       min_block_size=3, max_block_size=None):

    """Run ``m`` experiments using ``nthreads`` to time one SVP
    reduction for each strategy in ``strategies``.

    :param strategies_list: a list of lists of strategies
    :param nthreads: number of threads
    :param m: number of experiments to run, as the block size
              increases, the number of experiments is automatically
              reduced to ≥ ``max(10,nthreads)``
    :param min_block_size: ignore block sizes smaller than this
    :param max_block_size: ignore block sizes bigger than this
    """
    results = OrderedDict()

    if max_block_size is None:
        max_block_size = min_block_size
        for strategies in strategies_list:
            for strategy in strategies:
                if strategy.block_size > max_block_size:
                    max_block_size = strategy.block_size

    S = dict([(bs, []) for bs in range(min_block_size, max_block_size+1)])
    for strategies in strategies_list:
        for strategy in strategies:
            if strategy.block_size not in S:
                logger.warning("ignoring block_size: %3d of %s", strategy.block_size, strategy)
                continue
            S[strategy.block_size].append(strategies)

    results = [[] for bs in range(max_block_size+1)]

    for block_size in range(min_block_size, max_block_size+1):
        logger.info("= block size: %3d, m: %3d =", block_size, nsamples)
        for strategies in S[block_size]:

            return_queue = Queue()
            result = OrderedDict([("strategy", strategies[block_size]),
                                  ("total time", None)])

            stats = []
            # 2. run `k` processes in parallel until first callback
            for chunk in chunk_iterator(range(nsamples), nthreads):
                processes = []
                for i in chunk:
                    seed = 2**16 * block_size + i
                    param = BKZ.Param(block_size=block_size,
                                      strategies=list(strategies),
                                      flags=BKZ.VERBOSE|BKZ.GH_BND)
                    if nthreads > 1:
                        process = Process(target=svp_time, args=(seed, param, return_queue))
                        processes.append(process)
                        process.start()
                    else:
                        stats.append(svp_time(seed, param, None))

                active_children()

                if nthreads > 1:
                    for process in processes:
                        process.join()
                        stats.append(return_queue.get())

            total_time = sum([float(stat.data["cputime"]) for stat in stats])/nsamples
            length    = sum([stat.data["|A_0|"] for stat in stats])/nsamples
            logger.info("%10.6fs, %s, %.1f"%(total_time, strategies[block_size], length))

            result["total time"] = total_time
            result["length"] = length
            result["stats"] = stats

            results[block_size].append(result)

        if results[block_size][0]["total time"] > 1.0 and nsamples > 2*max(32, nthreads):
            nsamples /= 2

    return results