Пример #1
0
def chunked_or_distributed(items: Iterable[Any], max_groups: int,
                           optimal_group_size: int) -> Iterable[Iterable[Any]]:
    """Divide *items* into at most *max_groups*. If possible, produces fewer
    than *max_groups*, but with at most *optimal_group_size* items in each
    group."""
    if len(items) / optimal_group_size <= max_groups:
        return chunked(items, optimal_group_size)
    else:
        return distribute(max_groups, items)
Пример #2
0
 def divide_grid_in_blocs(self, grid):
     """
     Permet de diviser une grille en
     neuf blocs différents
     :returns: list
     """
     return list(
         flatten(
             map(lambda row: map(list, distribute(3, chunked(row, 3))),
                 chunked(flatten(grid), 27))))
Пример #3
0
def download_genomes_parallel(accessions, out_dir=None, threads=1):
    logger.debug(f"Start downloading genomes using {threads} threads.")
    list_of_accessions = distribute(
        threads, accessions)  # divide accession list into num of threads
    futures = []
    with ThreadPoolExecutor(max_workers=threads,
                            thread_name_prefix="thread") as executor:
        for _accessions in list_of_accessions:
            f = executor.submit(download_genomes_from_assembly, _accessions,
                                out_dir)
            futures.append(f)
    [f.result()
     for f in as_completed(futures)]  # wait until all the jobs finish
def reduce_p(function,
             sequence,
             n_jobs=multiprocessing.cpu_count(),
             initial=None):
    if n_jobs == 1:
        return reduce(function, sequence, initial)
    else:
        sliced_sequence = mit.distribute(n_jobs, sequence)
        with multiprocessing.Pool(n_jobs) as pool:
            new_sequence = pool.starmap(
                reduce, zip(repeat(function), sliced_sequence,
                            repeat(initial)))
        return reduce(function, new_sequence, initial)
Пример #5
0
def recombine_into(mating_group: t.List[t.Tuple[GraphIndividual, Record]],
                   brood_size: int) -> t.List[GraphIndividual]:
    """
    Take all genes and distribute them into progeny.
    For two individuals with N genes and `brood_size=1`, the single offspring will have 2N genes.
    :param mating_group: A group of individuals selected to give progeny.
    :param brood_size: A number of offsprings.
    :return: List of offsprings.
    """
    pool = random_permutation(
        chain.from_iterable(ind.genes() for ind, _ in mating_group))
    chunks = distribute(brood_size, pool)
    return _init_chunks(mating_group, chunks)
Пример #6
0
def recombine_genes_uniformly(mating_group: t.List[t.Tuple[Individual,
                                                           Record]],
                              brood_size: int) -> t.List[Individual]:
    """
    Combines genes of individuals in the `mating_group` in a single pool,
    and uniformly divides these genes into `brood_size` number of individuals.
    :param mating_group: A group of individuals selected to give progeny.
    :param brood_size: A number of offsprings.
    :return: List of offsprings.
    """
    pool = random_permutation(
        chain.from_iterable(ind.genes() for ind, _ in mating_group))
    chunks = take(brood_size, distribute(len(mating_group), pool))
    return _init_chunks(mating_group, chunks)
Пример #7
0
    def print(self, testcases, dryrun, workers, testcases_per_file):
        """ Multiprocess print testcases to files.

        Args:
            testcases (iterable): An iterator of testcases.
            dryrun (bool): Whether dryrun mode is enabled.
            workers (int): The number of processes to create.
            testcases_per_file (int, optional): The maximum number of testcases
                that can be printing int a single file.
        """
        chunks = distribute(workers, testcases)
        jobs = []
        for i in range(workers):
            p = Process(target=self._print,
                        args=(chunks[i], i, dryrun, testcases_per_file))
            jobs.append(p)
            p.start()
        [p.join() for p in jobs]
Пример #8
0
def test_exchange_fraction(random_genes):
    g1, g2, g3, g4 = (EdgeGene(1, 2, 'A', 'B', 1,
                               1), EdgeGene(1, 2, 'A', 'C', 1,
                                            1), EdgeGene(3, 4, 'A', 'B', 1, 1),
                      EdgeGene(3, 4, 'A', 'C', 1, 1))
    ind1 = GraphIndividual([g1])
    res = exchange_fraction([(ind1, Record(0, 0))], 1, 1.0)
    assert len(res) == 1
    ind1_ = res[0]
    assert set(ind1_.genes()) == {g1}
    ind2 = GraphIndividual([g1])
    ind1_, ind2_ = exchange_fraction([(ind1, Record(0, 0)),
                                      (ind2, Record(0, 0))], 2, 1.0)
    assert len(ind1_) == len(ind2_) == 1
    assert set(ind1_.genes()) == set(ind2_.genes()) == {g1}
    ind2 = GraphIndividual([g2])
    ind1_, ind2_ = exchange_fraction([(ind1, Record(0, 0)),
                                      (ind2, Record(0, 0))], 2, 0.9)
    assert set(ind1_.genes()) == {g1} and set(ind2_.genes()) == {g2}
    ind1 = GraphIndividual([g1, g2])
    ind2 = GraphIndividual([g3, g4])
    res = exchange_fraction([(ind1, Record(0, 0)), (ind2, Record(0, 0))], 2,
                            0.51)
    assert len(res) == 2
    ind1_, ind2_ = res
    assert len(ind1_) == len(ind2_) == 2

    genes1, genes2 = distribute(2, random_graph_genes)
    ind1, ind2 = GraphIndividual(genes1), GraphIndividual(genes2)
    res = exchange_fraction([(ind1, Record(0, 0)), (ind2, Record(0, 0))], 1,
                            0.5)
    assert len(res) == 1
    ind = res[0]
    old_genes, new_genes = map(list,
                               partition(lambda g: g in genes1, ind.genes()))
    assert len(old_genes) >= len(new_genes)
    assert set(new_genes).issubset(genes2)
Пример #9
0
    def run(self, dryrun=False, workers=1, testcases_per_file=1000):
        """ Run the generator: generate all testcases and print them to files.

        Args:
            dryrun (bool, optional): No files are actually written when dryrun
                is enabled. Defaults to False.
            workers (int, optional): The number of processes to use.
                Defaults to 1.
            testcases_per_file (int, optional): The maximum number of testcases
                that can be printing int a single file. Defaults to 1000.
        """
        ok = isinstance(dryrun, bool)
        ok &= isinstance(workers, int)
        ok &= isinstance(testcases_per_file, int)
        if not ok:
            message = 'Bad input types.'
            self.logger.error(f'TypeError: {message}')
            raise TypeError(message)

        ok &= workers > 0
        ok &= testcases_per_file > 0
        if not ok:
            message = 'Bad input values.'
            self.logger.error(f'ValueError: {message}')
            raise ValueError(message)

        self.logger.info(f'Generating {self.testcases_length} testcases...')

        # Make partitions
        self.logger.debug(
            f'STEP 1. Finding all possible ways in which {len(self.nodes)} '
            f'nodes can be partitioned into {self.number_of_partitions} '
            'partitions...')
        partitions = self.make_partitions()
        self.logger.debug(
            f'{self.number_of_nodes} nodes can be partitioned into '
            f'{self.number_of_partitions} partitions in '
            f'{self.S(len(self.nodes), self.number_of_partitions)} ways.')

        # Combine partitions with leaders
        self.logger.debug(
            'STEP 2. Finding all possible ways in which '
            f'{self.S(len(self.nodes), self.number_of_partitions)} partitions '
            f'can be combined with {len(self.target_nodes)} leaders...')
        scenarios = self.combine_partitions_with_leaders(partitions)
        self.logger.debug(
            f'{self.S(len(self.nodes), self.number_of_partitions)} partitions '
            f'can be combined with {len(self.target_nodes)} leaders in '
            f'{int(self.testcases_length**(1/self.number_of_rounds))} '
            f'possible ways.')

        # Combine the parition-leader scenarios from above with rounds
        self.logger.debug(
            'STEP 3. Finding all possible ways in which '
            f'{int(self.testcases_length**(1/self.number_of_rounds))} '
            'parition-leader scenarios combinations can be combined with '
            f'{self.number_of_rounds} rounds...')
        testcases = self.combine_scenarios_with_rounds(scenarios)
        self.logger.debug(
            f'{int(self.testcases_length**(1/self.number_of_rounds))} '
            'parition-leader scenarios can be combined with '
            f'{self.number_of_rounds} rounds in {self.testcases_length} '
            'possible ways.')

        # Print the resulting testcases to files
        self.logger.debug(
            f'Printing all testcases to file using {workers} processes...')
        testcases = distribute(self.number_of_machines, testcases)
        context_manager = TemporaryDirectory() if dryrun else nullcontext()
        with context_manager as directory:
            self.folder_path = self.folder_path if not dryrun else directory
            self.print(testcases[self.machine_index - 1], dryrun, workers,
                       testcases_per_file)

        self.logger.info(f'Finished.')
Пример #10
0
def deal(deck, nPlayers):
	return [(list(p)) for p in mit.distribute(nPlayers, deck)]
Пример #11
0
def update_pauli_string(circuit: Circuit,
                        pauli_string: {Circuit, dict[str, float]},
                        phase: float = 1,
                        parallel: {bool, int} = False,
                        return_info: bool = False,
                        use_mpi: bool = None,
                        compress: int = 4,
                        simplify: bool = True,
                        remove_id_gates: bool = True,
                        float_type: any = 'float32',
                        verbose: bool = False,
                        **kwargs) -> defaultdict:
    """
    Evolve density matrix accordingly to `circuit` using `pauli_string` as
    initial product state. The evolved density matrix will be represented as a
    set of different Pauli strings, each of them with a different phase, such
    that their sum corresponds to the evolved density matrix. The number of
    branches depends on the number of non-Clifford gates in `circuit`.

    Parameters
    ----------
    circuit: Circuit
        Circuit to use to evolve `pauli_string`.
    pauli_string: {Circuit, dict[str, float]}
        Pauli string to be evolved. `pauli_string` must be a `Circuit` composed
        of single qubit Pauli `Gate`s (that is, either `Gate('I')`, `Gate('X')`,
        `Gate('Y')` or `Gate('Z')`), each one acting on every qubit of
        `circuit`. If a dictionary is provided, every key of `pauli_string` must
        be a valid Pauli string. The size of each Pauli string must be equal to
        the number of qubits in `circuit`. Values in `pauli_string` will be
        used as inital phase for the given string.
    phase: float, optional
        Initial phase for `pauli_string`.
    atol: float, optional
        Discard all Pauli strings that have an absolute amplitude smaller than
        `atol`.
    parallel: int, optional
        Parallelize simulation (where possible). If `True`, the number of
        available cpus is used. Otherwise, a `parallel` number of threads is
        used.
    return_info: bool
        Return extra information collected during the evolution.
    use_mpi: bool, optional
        Use `MPI` if available. Unless `use_mpi=False`, `MPI` will be used if
        detected (for instance, if `mpiexec` is used to called HybridQ). If
        `use_mpi=True`, force the use of `MPI` (in case `MPI` is not
        automatically detected).
    compress: int, optional
        Compress `Circuit` using `utils.compress` prior the simulation.
    simplify: bool, optional
        Simplify `Circuit` using `utils.simplify` prior the simulation.
    remove_id_gates: bool, optional
        Remove `ID` gates prior the simulation.
    float_type: any, optional
        Float type to use for the simulation.
    verbose: bool, optional
        Verbose output.

    Returns
    -------
    dict[str, float] [, dict[any, any]]
        If `return_info=False`, `update_pauli_string` returns a `dict` of Pauli
        strings and the corresponding amplitude. The full density matrix can be
        reconstructed by resumming over all the Pauli string, weighted with the
        corresponding amplitude. If `return_info=True`, information gathered
        during the simulation are also returned.

    Other Parameters
    ----------------
    eps: float, optional (default: auto)
        Do not branch if the branch weight for the given non-Clifford operation
        is smaller than `eps`. `atol=1e-7` if `float_type=float32`, otherwise `atol=1e-8`
        if `float_type=float64`.
    atol: float, optional (default: auto)
        Remove elements from final state if such element as an absolute amplitude
        smaller than `atol`. `atol=1e-8` if `float_type=float32`, otherwise `atol=1e-12`
        if `float_type=float64`.
    branch_atol: float, optional
        Stop branching if the branch absolute amplitude is smaller than
        `branch_atol`. If not specified, it will be equal to `atol`.
    max_breadth_first_branches: int (default: auto)
        Max number of branches to collect using breadth first search. The number
        of branches collect during the breadth first phase will be split among
        the different threads (or nodes if using `MPI`).
    n_chunks: int (default: auto)
        Number of chunks to divide the branches obtained during the breadth
        first phase. The default value is twelve times the number of threads.
    max_virtual_memory: float (default: 80)
        Max virtual memory (%) that can be using during the simulation. If the
        used virtual memory is above `max_virtual_memory`, `update_pauli_string`
        will raise an error.
    sleep_time: float (default: 0.1)
        Completition of parallel processes is checked every `sleep_time`
        seconds.

    Example
    -------
    >>> from hybridq.circuit import utils
    >>> import numpy as np
    >>>
    >>> # Define circuit
    >>> circuit = Circuit(
    >>>     [Gate('X', qubits=[0])**1.2,
    >>>      Gate('ISWAP', qubits=[0, 1])**2.3])
    >>>
    >>> # Define Pauli string
    >>> pauli_string = Circuit([Gate('Z', qubits=[1])])
    >>>
    >>> # Get density matrix decomposed in Pauli strings
    >>> dm = clifford.update_pauli_string(circuit=circuit,
    >>>                                   pauli_string=pauli_string,
    >>>                                   float_type='float64')
    >>>
    >>> dm
    defaultdict(<function hybridq.circuit.simulation.clifford.update_pauli_string.<locals>._db_init.<locals>.<lambda>()>,
                {'IZ': 0.7938926261462365,
                 'YI': -0.12114687473997318,
                 'ZI': -0.166744368113685,
                 'ZX': 0.2377641290737882,
                 'YX': -0.3272542485937367,
                 'XY': -0.40450849718747345})
    >>> # Reconstruct density matrix
    >>> U = sum(phase * np.kron(Gate(g1).matrix(),
    >>>                         Gate(g2).matrix()) for (g1, g2), phase in dm.items())
    >>>
    >>> U
    array([[ 0.62714826+0.j        ,  0.23776413+0.j        ,
             0.        +0.12114687j,  0.        +0.73176275j],
           [ 0.23776413+0.j        , -0.96063699+0.j        ,
             0.        -0.07725425j,  0.        +0.12114687j],
           [ 0.        -0.12114687j,  0.        +0.07725425j,
             0.96063699+0.j        , -0.23776413+0.j        ],
           [ 0.        -0.73176275j,  0.        -0.12114687j,
            -0.23776413+0.j        , -0.62714826+0.j        ]])
    >>> np.allclose(utils.matrix(circuit + pauli_string + circuit.inv()),
    >>>             U,
    >>>             atol=1e-8)
    True
    >>> U[0b11, 0b11]
    (-0.6271482580325515+0j)
    """

    # ==== Set default parameters ====

    # If use_mpi==False, force the non-use of MPI
    if use_mpi is None and _detect_mpi:

        # Warn that MPI is used because detected
        warn("MPI has been detected. Using MPI.")

        # Set MPI to true
        use_mpi = True

    # If parallel==True, use number of cpus
    if type(parallel) is bool:
        parallel = cpu_count() if parallel else 1
    else:
        parallel = int(parallel)
        if parallel <= 0:
            warn("'parallel' must be a positive integer. Setting parallel=1")
            parallel = 1

    # utils.globalize may not work properly on MacOSX systems .. for now, let's
    # disable parallelization for MacOSX
    if parallel > 1:
        from platform import system
        from warnings import warn

        if system() == 'Darwin':
            warn(
                "'utils.globalize' may not work on MacOSX. Disabling parallelization."
            )
            parallel = 1

    # Fix atol
    if 'atol' in kwargs:
        atol = kwargs['atol']
        del (kwargs['atol'])
    else:
        float_type = np.dtype(float_type)
        if float_type == np.float64:
            atol = 1e-12
        elif float_type == np.float32:
            atol = 1e-8
        else:
            raise ValueError(f'Unsupported array dtype: {float_type}')

    # Fix branch_atol
    if 'branch_atol' in kwargs:
        branch_atol = kwargs['branch_atol']
        del (kwargs['branch_atol'])
    else:
        branch_atol = atol

    # Fix eps
    if 'eps' in kwargs:
        eps = kwargs['eps']
        del (kwargs['eps'])
    else:
        float_type = np.dtype(float_type)
        if float_type == np.float64:
            eps = 1e-8
        elif float_type == np.float32:
            eps = 1e-7
        else:
            raise ValueError(f'Unsupported array dtype: {float_type}')

    # Set default db initialization
    def _db_init():
        return defaultdict(int)

    # Set default transform
    def _transform(ps):

        # Join bitstring
        return ''.join({_X: 'X', _Y: 'Y', _Z: 'Z', _I: 'I'}[op] for op in ps)

    # Set default collect
    def _collect(db, ps, ph):

        # Update final paulis
        db[ps] += ph

        # Remove elements close to zero
        if abs(db[ps]) < atol:
            del (db[ps])

    # Set default merge
    def _merge(db, db_new, use_tuple=False):

        # Update final paulis
        for ps, ph in db_new if use_tuple else db_new.items():

            # Collect results
            kwargs['collect'](db, ps, ph)

    kwargs.setdefault('max_breadth_first_branches', min(4 * 12 * parallel,
                                                        2**14))
    kwargs.setdefault('n_chunks', 12 * parallel)
    kwargs.setdefault('max_virtual_memory', 80)
    kwargs.setdefault('sleep_time', 0.1)
    kwargs.setdefault('collect', _collect)
    kwargs.setdefault('transform', _transform)
    kwargs.setdefault('merge', _merge)
    kwargs.setdefault('db_init', _db_init)

    # Get MPI info
    if use_mpi:
        from mpi4py import MPI
        _mpi_comm = MPI.COMM_WORLD
        _mpi_size = _mpi_comm.Get_size()
        _mpi_rank = _mpi_comm.Get_rank()
        kwargs.setdefault('max_breadth_first_branches_mpi',
                          min(_mpi_size * 2**9, 2**14))
        kwargs.setdefault('mpi_chunk_max_size', 2**20)
        kwargs.setdefault('mpi_merge', True)

    # Get complex_type from float_type
    complex_type = (np.array([1], dtype=float_type) +
                    1j * np.array([1], dtype=float_type)).dtype

    # Local verbose
    _verbose = verbose and (not use_mpi or _mpi_rank == 0)

    # =========== CHECKS =============

    if type(pauli_string) == Circuit:
        from collections import Counter

        # Initialize error message
        _err_msg = "'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."

        # Check qubits match with circuit
        if any(g.n_qubits != 1 or not g.qubits for g in pauli_string) or set(
                pauli_string.all_qubits()).difference(
                    circuit.all_qubits()) or set(
                        Counter(gate.qubits[0]
                                for gate in pauli_string).values()).difference(
                                    [1]):
            raise ValueError(_err_msg)

        # Get ideal paulis
        _ig = list(map(lambda n: Gate(n).matrix(), 'IXYZ'))

        # Get the correct pauli
        def _get_pauli(gate):
            # Get matrix
            U = gate.matrix()

            # Get right pauli
            p = next(
                (p for x, p in enumerate('IXYZ') if np.allclose(_ig[x], U)),
                None)

            # If not found, raise error
            if not p:
                raise ValueError(_err_msg)

            # Otherwise, return pauli
            return Gate(p, qubits=gate.qubits)

        # Reconstruct paulis
        pauli_string = Circuit(map(_get_pauli, pauli_string))

    else:

        # Check that all strings only have I,X,Y,Z tokens
        _n_qubits = len(circuit.all_qubits())
        if any(
                set(p).difference('IXYZ') or len(p) != _n_qubits
                for p in pauli_string):
            raise ValueError(
                f"'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."
            )

    # ================================

    # Start pre-processing time
    _prep_time = time()

    # Get qubits
    _qubits = circuit.all_qubits()

    # Remove ID gates
    if remove_id_gates:
        circuit = Circuit(gate for gate in circuit if gate.name != 'I')

    # Simplify circuit
    if simplify:
        # Get qubits to pin
        if type(pauli_string) == Circuit:
            # Pinned qubits
            _pinned_qubits = pauli_string.all_qubits()

        else:
            # Find qubits to pin
            _pinned_qubits = set.union(
                *({q
                   for q, g in zip(_qubits, p)
                   if g != 'I'}
                  for p in pauli_string))

        # Simplify
        circuit = utils.simplify(circuit,
                                 remove_id_gates=remove_id_gates,
                                 verbose=_verbose)
        circuit = utils.popright(utils.simplify(circuit),
                                 pinned_qubits=set(_pinned_qubits).intersection(
                                     circuit.all_qubits()),
                                 verbose=_verbose)

    # Compress circuit
    circuit = Circuit(
        utils.to_matrix_gate(c, complex_type=complex_type)
        for c in tqdm(utils.compress(circuit, max_n_qubits=compress),
                      disable=not _verbose,
                      desc=f"Compress ({int(compress)})"))

    # Pad missing qubits
    circuit += Circuit(
        Gate('MATRIX', [q], U=np.eye(2))
        for q in set(_qubits).difference(circuit.all_qubits()))

    # Get qubits map
    qubits_map = kwargs['qubits_map'] if 'qubits_map' in kwargs else {
        q: x for x, q in enumerate(circuit.all_qubits())
    }

    # Pre-process circuit
    _LS_cache = {}
    _P_cache = {}
    circuit = [
        g for gate in tqdm(reversed(circuit),
                           total=len(circuit),
                           disable=not _verbose,
                           desc='Pre-processing')
        for g in _process_gate(gate, LS_cache=_LS_cache, P_cache=_P_cache)
    ]
    _LS_cache.clear()
    _P_cache.clear()
    del (_LS_cache)
    del (_P_cache)

    # Get maximum number of qubits and parameters
    _max_n_qubits = max(max(len(gate[1]) for gate in circuit), 2)
    _max_n_params = max(len(gate[2]) for gate in circuit)

    # Get qubits
    qubits = np.array([
        np.pad([qubits_map[q]
                for q in gate[1]], (0, _max_n_qubits - len(gate[1])))
        for gate in circuit
    ],
                      dtype='int32')

    # Get parameters
    params = np.round(
        np.array([
            np.pad(gate[2], (0, _max_n_params - len(gate[2])))
            for gate in circuit
        ],
                 dtype=float_type),
        -int(np.floor(np.log10(atol))) if atol < 1 else 0)

    # Remove -0
    params[np.abs(params) == 0] = 0

    # Quick check
    assert (all('_' + gate[0] in globals() for gate in circuit))

    # Get gates
    gates = np.array([globals()['_' + gate[0]] for gate in circuit],
                     dtype='int')

    # Compute expected number of paths
    _log2_n_expected_branches = 0
    for _idx in np.where(np.isin(gates, _MATRIX_SET))[0]:
        _nq = (gates[_idx] // _gate_mul) + 1
        _p = params[_idx][:4**(2 * _nq)]
        _log2_n_expected_branches += np.sum(
            np.log2(
                np.sum(np.abs(np.reshape(_p, (4**_nq, 4**_nq))) > eps,
                       axis=1))) / 4**_nq

    # Check
    assert (len(gates) == len(qubits) and len(gates) == len(params))

    # Initialize branches
    if type(pauli_string) == Circuit:

        # Convert Pauli string
        _pauli_string = np.array([_I] * len(qubits_map), dtype='int')
        for gate in pauli_string:
            if gate.name != 'I':
                _pauli_string[qubits_map[gate.qubits[0]]] = {
                    'X': _X,
                    'Y': _Y,
                    'Z': _Z
                }[gate.name]

        # Initialize branches
        branches = [(_pauli_string, phase, 0)]

    else:

        # Initialize branches
        branches = [(np.array([{
            'I': _I,
            'X': _X,
            'Y': _Y,
            'Z': _Z
        }[g]
                               for g in p],
                              dtype='int'), phase, 0)
                    for p, phase in pauli_string.items()
                    if abs(phase) > atol]

    # Initialize final Pauli strings
    db = kwargs['db_init']()

    # Define update function
    _update = partial_func(_update_pauli_string,
                           gates,
                           qubits,
                           params,
                           eps=eps,
                           atol=branch_atol)

    # End pre-processing time
    _prep_time = time() - _prep_time

    # Initialize infos
    _info_init = lambda: {
        'n_explored_branches': 0,
        'largest_n_branches_in_memory': 0,
        'peak_virtual_memory (GB)': virtual_memory().used / 2**30,
        'average_virtual_memory (GB)': (virtual_memory().used / 2**30, 1),
        'n_threads': parallel,
        'n_cpus': cpu_count(),
        'eps': eps,
        'atol': atol,
        'branch_atol': branch_atol,
        'float_type': str(float_type),
        'log2_n_expected_branches': _log2_n_expected_branches
    }
    infos = _info_init()
    infos['memory_baseline (GB)'] = virtual_memory().used / 2**30
    if not use_mpi or _mpi_rank == 0:
        infos['n_explored_branches'] = 1
        infos['largest_n_branches_in_memory'] = 1

    # Start clock
    _init_time = time()

    # Scatter first batch of branches to different MPI nodes
    if use_mpi and _mpi_size > 1:

        if _mpi_rank == 0:

            # Explore branches (breadth-first search)
            branches = _breadth_first_search(
                _update,
                db,
                branches,
                max_n_branches=kwargs['max_breadth_first_branches_mpi'],
                infos=infos,
                verbose=verbose,
                mpi_rank=_mpi_rank,
                **kwargs)

        # Distribute branches
        branches = _mpi_comm.scatter(
            [list(x) for x in distribute(_mpi_size, branches)], root=0)

    # Explore branches (breadth-first search)
    branches = _breadth_first_search(
        _update,
        db,
        branches,
        max_n_branches=kwargs['max_breadth_first_branches'],
        infos=infos,
        verbose=verbose if not use_mpi or _mpi_rank == 0 else False,
        **kwargs)

    # If there are remaining branches, use depth-first search
    if branches:

        _depth_first_search(_update,
                            db,
                            branches,
                            parallel=parallel,
                            infos=infos,
                            info_init=_info_init,
                            verbose=verbose,
                            mpi_rank=_mpi_rank if use_mpi else 0,
                            mpi_size=_mpi_size if use_mpi else 1,
                            **kwargs)

    # Update infos
    infos['average_virtual_memory (GB)'] = infos['average_virtual_memory (GB)'][
        0] / infos['average_virtual_memory (GB)'][1] - infos[
            'memory_baseline (GB)']
    infos['peak_virtual_memory (GB)'] -= infos['memory_baseline (GB)']

    # Update branching time
    infos['branching_time (s)'] = time() - _init_time

    # Collect results
    if use_mpi and _mpi_size > 1 and kwargs['mpi_merge']:

        for _k in infos:
            infos[_k] = [infos[_k]]

        # Initialize pbar
        if _mpi_rank == 0:
            pbar = tqdm(total=int(np.ceil(np.log2(_mpi_size))),
                        disable=not verbose,
                        desc='Collect results')

        # Initialize tag and size
        _tag = 0
        _size = _mpi_size
        while _size > 1:
            # Update progressbar
            if _mpi_rank == 0:
                pbar.set_description(
                    f'Collect results (Mem={virtual_memory().percent}%)')

            # Get shift
            _shift = (_size // 2) + (_size % 2)

            if _mpi_rank < (_size // 2):
                # Get infos
                _infos = _mpi_comm.recv(source=_mpi_rank + _shift, tag=_tag)

                # Update infos
                for _k in infos:
                    infos[_k].extend(_infos[_k])

                # Get number of chunks
                _n_chunks = _mpi_comm.recv(source=_mpi_rank + _shift,
                                           tag=_tag + 1)

                if _n_chunks > 1:
                    # Initialize _process
                    with tqdm(range(_n_chunks),
                              desc='Get db',
                              leave=False,
                              disable=_mpi_rank != 0) as pbar:
                        for _ in pbar:
                            # Receive db
                            _db = _mpi_comm.recv(source=_mpi_rank + _shift,
                                                 tag=_tag + 2)

                            # Merge datasets
                            kwargs['merge'](db, _db, use_tuple=True)

                            # Update description
                            pbar.set_description(
                                f'Get db (Mem={virtual_memory().percent}%)')

                            # Clear dataset
                            _db.clear()

                else:
                    # Receive db
                    _db = _mpi_comm.recv(source=_mpi_rank + _shift,
                                         tag=_tag + 2)

                    # Merge datasets
                    kwargs['merge'](db, _db)

                    # Clear dataset
                    _db.clear()

            elif _shift <= _mpi_rank < _size:
                # Remove default_factory because pickle is picky regarding local objects
                db.default_factory = None

                # Send infos
                _mpi_comm.send(infos, dest=_mpi_rank - _shift, tag=_tag)

                # Compute chunks
                _n_chunks = kwargs['mpi_chunk_max_size']
                _n_chunks = (len(db) // _n_chunks) + (
                    (len(db) % _n_chunks) != 0)

                # Send number of chunks
                _mpi_comm.send(_n_chunks, dest=_mpi_rank - _shift, tag=_tag + 1)

                if _n_chunks > 1:
                    # Split db in chunks
                    for _db in chunked(db.items(),
                                       kwargs['mpi_chunk_max_size']):
                        _mpi_comm.send(_db,
                                       dest=_mpi_rank - _shift,
                                       tag=_tag + 2)
                else:
                    # Send db
                    _mpi_comm.send(db, dest=_mpi_rank - _shift, tag=_tag + 2)

                # Reset db and infos
                db.clear()
                infos.clear()

            # update size
            _tag += 3
            _size = _shift
            _mpi_comm.barrier()

            # Update progressbar
            if _mpi_rank == 0:
                pbar.set_description(
                    f'Collect results (Mem={virtual_memory().percent}%)')
                pbar.update()

    # Update runtime
    if not use_mpi or _mpi_rank == 0 or not kwargs['mpi_merge']:
        infos['runtime (s)'] = time() - _init_time
        infos['pre-processing (s)'] = _prep_time

    # Check that all the others dbs/infos (excluding rank==0) has been cleared up
    if use_mpi and _mpi_rank > 0 and kwargs['mpi_merge']:
        assert (not len(db) and not len(infos))

    if return_info:
        return db, infos
    else:
        return db
Пример #12
0
def _depth_first_search(_update, db, branches, parallel, infos, info_init,
                        verbose, mpi_rank, mpi_size, **kwargs):

    # Define parallel core
    def _parallel_core(branches, db=None):

        # Initialize db
        if db is None:
            db = kwargs['db_init']()

        # Convert to list
        branches = list(branches)

        # Initialize infos
        infos = info_init()

        # Explore all branches
        while branches:

            # Get new branches
            (_new_ps, _new_ph), _new_branches = _update(*branches.pop())

            # Collect results
            kwargs['collect'](db, kwargs['transform'](_new_ps), _new_ph)

            # Update branches
            branches.extend(_new_branches)

            # Update infos
            infos['largest_n_branches_in_memory'] = max(
                len(branches), infos['largest_n_branches_in_memory'])

            # Update infos
            infos['n_explored_branches'] += 1

        return db, infos

    # If no parallelization is requires, explore branchces one by one
    if parallel == 1:
        from more_itertools import ichunked

        # Get number of chunks
        chunk_size = max(1, len(branches) // 100)

        for _bs in tqdm(ichunked(branches, chunk_size),
                        total=len(branches) // chunk_size,
                        desc=f'Mem={virtual_memory().percent}%',
                        disable=not verbose):
            # Update database and infos
            db, _infos = _parallel_core(_bs, db)

            # Update infos
            infos['n_explored_branches'] += _infos['n_explored_branches']
            infos['largest_n_branches_in_memory'] = max(
                _infos['largest_n_branches_in_memory'],
                infos['largest_n_branches_in_memory'])

    # Otherwise, distribute workload among different cores
    else:
        with globalize(_parallel_core) as _parallel_core, Pool(
                parallel) as pool:

            # Apply async
            _fps = [
                pool.apply_async(_parallel_core, (_branches,))
                for _branches in distribute(kwargs['n_chunks'], branches)
            ]
            _status = [False] * len(_fps)

            with tqdm(total=len(_fps),
                      desc=f'Mem={virtual_memory().percent}%',
                      disable=not verbose) as pbar:

                _pending = len(_fps)
                while _pending:

                    # Wait
                    sleep(kwargs['sleep_time'])

                    # Activate/disactivate
                    if verbose:
                        pbar.disable = int(time()) % mpi_size != mpi_rank

                    # Get virtual memory
                    _vm = virtual_memory()

                    _pending = 0
                    for _x, (_p, _s) in enumerate(zip(_fps, _status)):
                        if not _p.ready():
                            _pending += 1
                        elif not _s:
                            # Collect data
                            _new_db, _infos = _p.get()

                            # Merge datasets
                            kwargs['merge'](db, _new_db)

                            # Clear dataset
                            _new_db.clear()

                            # Update infos
                            infos['n_explored_branches'] += _infos[
                                'n_explored_branches']
                            infos['largest_n_branches_in_memory'] = max(
                                _infos['largest_n_branches_in_memory'],
                                infos['largest_n_branches_in_memory'])

                            # Set status
                            _status[_x] = True

                    # Update pbar
                    if verbose:
                        pbar.set_description(
                            (f'[{mpi_rank}] ' if mpi_size > 1 else '') + \
                            f'Mem={_vm.percent}%, ' + \
                            f'NThreads={infos["n_threads"]}, ' + \
                            f'NCPUs={infos["n_cpus"]}, ' + \
                            f'LoadAvg={getloadavg()[0]/infos["n_cpus"]*100:1.2f}%, ' + \
                            f'NBranches={infos["n_explored_branches"]}'
                        )
                        pbar.n = len(_fps) - _pending
                        pbar.refresh()

                    # Update infos
                    infos['average_virtual_memory (GB)'] = (
                        infos['average_virtual_memory (GB)'][0] +
                        _vm.used / 2**30,
                        infos['average_virtual_memory (GB)'][1] + 1)
                    infos['peak_virtual_memory (GB)'] = max(
                        infos['peak_virtual_memory (GB)'], _vm.used / 2**30)

                    # If memory above threshold, raise error
                    if _vm.percent > kwargs['max_virtual_memory']:
                        raise MemoryError(
                            f'Memory above threshold: {_vm.percent}% > {kwargs["max_virtual_memory"]}%'
                        )

                # Last refresh
                if verbose:
                    pbar.refresh()

        # Check all chunks have been explored
        assert (np.alltrue(_status))
import random
import more_itertools as mit
import operator

# Build a Deck
suits = "CDHS"
ranks = list(range(2, 11)) + list("JQKA")
for i in range(0, len(ranks)):
    ranks[i] = str(ranks[i])
DeckCard = [j + i for j in suits for i in ranks]

# Shuffle and Distribute
players = 4
random.shuffle(DeckCard)
hands = [list(hand) for hand in list(mit.distribute(players, DeckCard))]
#return all 4 hands to each player
print("player1:", hands[0])
print("player2:", hands[1])
print("player3:", hands[2])
print("AgentHand:", hands[3])

playcard1 = 'SA'
playcard2 = 'SK'
playcard3 = 'SQ'
playcard4 = 'S10'
trick = [playcard1, playcard2, playcard3, playcard4]
print(len(trick))
if len(trick) == 4:
    leadSuit = trick[0][1]
    print(leadSuit)
    cards_value = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14 }
Пример #14
0
from AnalyticalReasoning import Variable
import more_itertools as m_it

n = 5
group = numpy.empty(n, dtype=object)
A = Variable("A")
B = Variable("B")
C = Variable("C")
D = Variable("D")
E = Variable("E")
F = Variable("F")
G = Variable("G")
H = Variable("H")
I = Variable("I")
group[0] = I
group[1] = H
group[2] = G
group[3] = F
group[4] = E
print(len(group))

children = m_it.distribute(3, [1, 2, 3, 4, 5, 6, 7])

distinct_lists = []
for perm in it.permutations([1, 2, 3, 4, 5, 6]):
    lists = [set(s) for s in m_it.split_into(perm, [1, 2, 3])]
    #print(lists)
    if (lists not in distinct_lists):
        distinct_lists.append(lists)
        print(lists)
#print(distinct_lists)