Esempio n. 1
0
def bridge(input, output, input2 = None, **kwargs):
    """
    """
    def load_input(in_path, in_address, table = False, **kwargs):
        """
        """
        from MD_toolkit.HDF5_File import HDF5_File as h5

        with h5(in_path) as in_h5:
            if table: dataset = in_h5.load(in_address, type = "table")
            else:     dataset = in_h5.load(in_address, type = "array")
            attrs = in_h5.attrs("0000/{0}".format(in_address[2:]))
        print("Dataset loaded from {0}[{1}]".format(in_path, in_address))
        return dataset, attrs

    # Load data
    dataset, attrs = load_input(in_path=input[0], in_address=input[1],
      **kwargs)
    if input2 is not None:
        dataset2, _ = load_input(in_path=input2[0], in_address=input2[1],
          **kwargs)
        stacked = np.column_stack((dataset, dataset2))
        minimum = np.min(stacked , axis = 1)
        dataset = minimum[:, np.newaxis]

    block = Bridge_Block(dataset=dataset, attrs=attrs, output=tuple(output))

    block_acceptor = Block_Acceptor(outputs=[tuple(output)])
    block_acceptor.next()
    block_acceptor.send(block)
    block_acceptor.close()
Esempio n. 2
0
def command_line(n_cores = 1, **kwargs):
    """
    Provides command line functionality for this analysis

    **Arguments:**
        :*n_cores*: Number of cores to use

    .. todo:
        - Figure out syntax to get this into MDclt.primary
    """
    from multiprocessing import Pool

    block_generator   = Association_Block_Generator(**kwargs)
    block_accumulator = Association_Block_Accumulator(
                          preexisting_slice = block_generator.preexisting_slice,
                          incoming_slice    = block_generator.incoming_slice,
                          **kwargs)
    block_accumulator.next()

    if n_cores == 1:                # Serial
        for block in block_generator:
            block()
            block_accumulator.send(block)
    else:                           # Parallel (processes)
        pool = Pool(n_cores)
        for block in pool.imap_unordered(pool_director, block_generator):
            block_accumulator.send(block)
        pool.close()
        pool.join()

    block_accumulator.close()
    block_acceptor = Block_Acceptor(out_path = kwargs["output"], **kwargs)
    block_acceptor.next()
    block_acceptor.send(block_accumulator)
    block_acceptor.close()