Пример #1
0
    def func(n_cores = 1, **kwargs):
        """
        Function for command line action

        **Arguments:**
            :*n_cores*: Number of cores to use
        """
        from multiprocessing import Pool

        block_generator   = block_generator_class(**kwargs)
        block_accumulator = block_accumulator_class(
          preexisting_slice = block_generator.preexisting_slice,
          incoming_slice    = block_generator.incoming_slice,
          outputs           = block_generator.outputs,
          **kwargs)

        if n_cores == 1:                # Serial
            for block in block_generator:
                block()
                block_accumulator.send(block)
        else:                           # Parallel (processes)
            pool = Pool(n_cores)
            for block in pool.imap_unordered(pool_director, block_generator):
                pass
                block_accumulator.send(block)
            pool.close()
            pool.join()

        block_accumulator.close()

        block_acceptor = Block_Acceptor(outputs = block_accumulator.outputs,
                           **kwargs)
        block_acceptor.send(block_accumulator)
        block_acceptor.close()
Пример #2
0
def bridge(input, output, input2 = None, **kwargs):
    """
    """
    def load_input(in_path, in_address, table = False, **kwargs):
        """
        """
        from MD_toolkit.HDF5_File import HDF5_File as h5

        with h5(in_path) as in_h5:
            if table: dataset = in_h5.load(in_address, type = "table")
            else:     dataset = in_h5.load(in_address, type = "array")
            attrs = in_h5.attrs("0000/{0}".format(in_address[2:]))
        print("Dataset loaded from {0}[{1}]".format(in_path, in_address))
        return dataset, attrs

    # Load data
    dataset, attrs = load_input(in_path=input[0], in_address=input[1],
      **kwargs)
    if input2 is not None:
        dataset2, _ = load_input(in_path=input2[0], in_address=input2[1],
          **kwargs)
        stacked = np.column_stack((dataset, dataset2))
        minimum = np.min(stacked , axis = 1)
        dataset = minimum[:, np.newaxis]

    block = Bridge_Block(dataset=dataset, attrs=attrs, output=tuple(output))

    block_acceptor = Block_Acceptor(outputs=[tuple(output)])
    block_acceptor.next()
    block_acceptor.send(block)
    block_acceptor.close()
Пример #3
0
def command_line(n_cores = 1, **kwargs):
    """
    Provides command line functionality for this analysis

    **Arguments:**
        :*n_cores*: Number of cores to use

    .. todo:
        - Figure out syntax to get this into MDclt.primary
    """
    from multiprocessing import Pool
    from MDclt import pool_director

    block_generator = AmberLog_Block_Generator(**kwargs)
    block_acceptor  = Block_Acceptor(outputs = block_generator.outputs,
                        **kwargs)
    if n_cores == 1:                # Serial
        for block in block_generator:
            block()
            block_acceptor.send(block)
    else:                           # Parallel (processes)
        pool = Pool(n_cores)
        for block in pool.imap_unordered(pool_director, block_generator):
            pass
            block_acceptor.send(block)
        pool.close()
        pool.join()

    block_acceptor.close()
Пример #4
0
def command_line(n_cores = 1, **kwargs):
    """
    Provides command line functionality for this analysis

    **Arguments:**
        :*n_cores*: Number of cores to use

    .. todo:
        - Figure out syntax to get this into MDclt.primary
    """
    from multiprocessing import Pool

    block_generator   = Association_Block_Generator(**kwargs)
    block_accumulator = Association_Block_Accumulator(
                          preexisting_slice = block_generator.preexisting_slice,
                          incoming_slice    = block_generator.incoming_slice,
                          **kwargs)
    block_accumulator.next()

    if n_cores == 1:                # Serial
        for block in block_generator:
            block()
            block_accumulator.send(block)
    else:                           # Parallel (processes)
        pool = Pool(n_cores)
        for block in pool.imap_unordered(pool_director, block_generator):
            block_accumulator.send(block)
        pool.close()
        pool.join()

    block_accumulator.close()
    block_acceptor = Block_Acceptor(out_path = kwargs["output"], **kwargs)
    block_acceptor.next()
    block_acceptor.send(block_accumulator)
    block_acceptor.close()
Пример #5
0
 def func(**kwargs):
     """
     Function for command line action
     """
     analyzer = analyzer_class(**kwargs)
     analyzer()
     acceptor  = Block_Acceptor(outputs = analyzer.outputs, **kwargs)
     acceptor.send(analyzer)
     acceptor.close()
     print(analyzer)
Пример #6
0
def assign_command_line(**kwargs):
    """
    Function for command line action

    **Arguments:**
        :*n_cores*: Number of cores to use
    """
    analyzer = Assign_Analyzer(**kwargs)
    analyzer()
    acceptor  = Block_Acceptor(outputs = analyzer.outputs, **kwargs)
    acceptor.send(analyzer)
    acceptor.close()
Пример #7
0
def coord_command_line(n_cores = 1, **kwargs):
    """
    Function for command line action

    **Arguments:**
        :*n_cores*: Number of cores to use
    """
    generator = Assign_Block_Generator(**kwargs)
    acceptor  = Block_Acceptor(outputs = generator.outputs, **kwargs)

    if n_cores == 1:                # Serial
        for block in generator:
            block()
            acceptor.send(block)
    else:                           # Parallel (processes)
        from multiprocessing import Pool
        pool = Pool(n_cores)
        for block in pool.imap_unordered(pool_director, generator):
            acceptor.send(block)
        pool.close()
        pool.join()

    acceptor.close()