Beispiel #1
0
def ipyparallel_mapper(X,y,w_list,*args):
    
    num_cores = 6
    
    if w_list.shape[1]<16*num_cores:
        return map_losses(X,y,w_list,np.arange(0,w_list.shape[1]))
    
    weights_index = np.arange(0,w_list.shape[1])
    weights_index_split = np.array_split(weights_index,num_cores)
    
    
    mydict=dict(w_list=w_list)
    dview.push(mydict)

    rX = [ipp.Reference('X') for i in range(num_cores)]
    ry = [ipp.Reference('y') for i in range(num_cores)]
    rweights = [ipp.Reference('w_list') for i in range(num_cores)]
    
    result = dview.map_sync(map_losses,rX,ry,rweights,weights_index_split)

    loss = []
    indx = []
    for l in result:
        loss.extend(l)
    loss = np.hstack(loss)
    return loss
def update_dtc_pop(pop, td):
    '''
    inputs a population of genes/alleles, the population size MU, and an optional argument of a rheobase value guess
    outputs a population of genes/alleles, a population of individual object shells, ie a pickleable container for gene attributes.
    Rationale, not every gene value will result in a model for which rheobase is found, in which case that gene is discarded, however to
    compensate for losses in gene population size, more gene samples must be tested for a successful return from a rheobase search.
    If the tests return are successful these new sampled individuals are appended to the population, and then their attributes are mapped onto
    corresponding virtual model objects.
    '''
    import copy
    import numpy as np
    from deap import base
    toolbox = base.Toolbox()
    Individual = ipp.Reference('Individual')
    pop = [toolbox.clone(i) for i in pop]

    def transform(ind):
        from neuronunit.optimization.data_transport_container import DataTC
        dtc = DataTC()
        print(dtc)
        param_dict = {}
        for i, j in enumerate(ind):
            param_dict[td[i]] = str(j)
        dtc.attrs = param_dict
        dtc.evaluated = False
        return dtc

    if len(pop) > 0:
        dtcpop = list(dview.map_sync(transform, pop))
    else:
        # In this case pop is not really a population but an individual
        # but parsimony of naming variables
        # suggests not to change the variable name to reflect this.
        dtcpop = list(transform(pop))
    return dtcpop
Beispiel #3
0
 def test_can_list_kwarg(self):
     """kwargs in lists are canned"""
     view = self.client[-1]
     view['a'] = 128
     rA = pmod.Reference('a')
     ar = view.apply_async(lambda x=5: x, x=[rA])
     r = ar.get(5)
     self.assertEqual(r, [128])
Beispiel #4
0
 def test_can_dict_kwarg(self):
     """kwargs in dicts are canned"""
     view = self.client[-1]
     view['a'] = 128
     rA = pmod.Reference('a')
     ar = view.apply_async(lambda x=5: x, dict(foo=rA))
     r = ar.get(5)
     self.assertEqual(r, dict(foo=128))
Beispiel #5
0
 def test_run_newline(self):
     """test that run appends newline to files"""
     with NamedTemporaryFile('w', delete=False) as f:
         f.write("""def g():
             return 5
             """)
     v = self.client[-1]
     v.run(f.name, block=True)
     self.assertEqual(v.apply_sync(lambda f: f(), pmod.Reference('g')), 5)
Beispiel #6
0
    def test_push_numpy_nocopy(self):
        import numpy
        view = self.client[:]
        a = numpy.arange(64)
        view['A'] = a

        @interactive
        def check_writeable(x):
            return x.flags.writeable

        for flag in view.apply_sync(check_writeable, pmod.Reference('A')):
            self.assertFalse(
                flag, "array is writeable, push shouldn't have pickled it")

        view.push(dict(B=a))
        for flag in view.apply_sync(check_writeable, pmod.Reference('B')):
            self.assertFalse(
                flag, "array is writeable, push shouldn't have pickled it")
Beispiel #7
0
 def test_apply_reference(self):
     """view.apply(<Reference>, *args) should work"""
     v = self.client[:]
     v.scatter('n', self.client.ids, flatten=True)
     v.execute("f = lambda x: n*x")
     rf = pmod.Reference('f')
     result = v.apply_sync(rf, 5)
     expected = [5 * id for id in self.client.ids]
     self.assertEqual(result, expected)
Beispiel #8
0
    def test_map_ref(self):
        """view.map works with references"""
        view = self.client[:]
        ranks = sorted(self.client.ids)
        view.scatter('rank', ranks, flatten=True)
        rrank = pmod.Reference('rank')

        amr = view.map_async(lambda x: x * 2, [rrank] * len(view))
        drank = amr.get(5)
        self.assertEqual(drank, [r * 2 for r in ranks])
Beispiel #9
0
 def test_map_reference(self):
     """view.map(<Reference>, *seqs) should work"""
     v = self.client[:]
     v.scatter('n', self.client.ids, flatten=True)
     v.execute("f = lambda x,y: x*y")
     rf = pmod.Reference('f')
     nlist = list(range(10))
     mlist = nlist[::-1]
     expected = [m * n for m, n in zip(mlist, nlist)]
     result = v.map_sync(rf, mlist, nlist)
     self.assertEqual(result, expected)
Beispiel #10
0
 def test_z_crash_mux(self):
     """test graceful handling of engine death (direct)"""
     self.add_engines(1)
     self.minimum_engines(2)
     eid = self.client.ids[-1]
     view = self.client[-2:]
     view.scatter('should_crash', [False, True], flatten=True)
     ar = view.apply_async(conditional_crash, ipp.Reference("should_crash"))
     self.assertRaisesRemote(error.EngineError, ar.get, 10)
     tic = time.perf_counter()
     while eid in self.client.ids and time.perf_counter() - tic < 5:
         time.sleep(0.05)
     assert eid not in self.client.ids
 def __next__(self):
     it_name = '_%s_iter' % self.name
     self.view.execute('%s = iter(%s)' % (it_name, self.name), block=True)
     next_ref = ipyparallel.Reference(it_name + '.next')
     while True:
         try:
             yield self.view.apply_sync(next_ref)
         # This causes the StopIteration exception to be raised.
         except ipyparallel.RemoteError as e:
             if e.ename == 'StopIteration':
                 raise StopIteration
             else:
                 raise e
Beispiel #12
0
def remote_iterator(view, name):
    """Return an iterator on an object living on a remote engine."""
    view.execute(f'it{name}=iter({name})', block=True)
    while True:
        try:
            result = view.apply_sync(next, ipp.Reference('it' + name))
        # This causes the StopIteration exception to be raised.
        except RemoteError as e:
            if e.ename == 'StopIteration':
                raise StopIteration
            else:
                raise e
        else:
            yield result
Beispiel #13
0
def remote_iterator(view, name):
    """Return an iterator on an object living on a remote engine."""
    it_name = '_%s_iter' % name
    view.execute('%s = iter(%s)' % (it_name,name), block=True)
    ref = parallel.Reference(it_name)
    while True:
        try:
            yield view.apply_sync(next, ref)
        # This causes the StopIteration exception to be raised.
        except parallel.RemoteError as e:
            if e.ename == 'StopIteration':
                raise StopIteration
            else:
                raise e
def import_list(ipp, subset, NDIM):
    Individual = ipp.Reference('Individual')
    from deap import base, creator, tools
    import deap
    import random
    history = deap.tools.History()
    toolbox = base.Toolbox()
    import numpy as np
    ##
    # Range of the genes:
    ##
    BOUND_LOW = [np.min(i) for i in subset.values()]
    BOUND_UP = [np.max(i) for i in subset.values()]

    ##
    # number of objectives/error functions
    ##
    #NDIM = numb_err_f
    def uniform(low, up, size=None):
        try:
            return [random.uniform(a, b) for a, b in zip(low, up)]
        except TypeError:
            return [
                random.uniform(a, b) for a, b in zip([low] * size, [up] * size)
            ]

    # weights vector should compliment a numpy matrix of eigenvalues and other values
    #creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0))
    weights = tuple([-1.0 for i in range(0, NDIM)])
    creator.create("FitnessMin", base.Fitness, weights=weights)
    creator.create("Individual", list, fitness=creator.FitnessMin)
    toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, len(BOUND_UP))
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.attr_float)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selNSGA2)
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=30.0)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0,
                     indpb=1.0 / NDIM)
    return toolbox, tools, history, creator, base
Beispiel #15
0
def pwordfreq(view, fnames):
    """Parallel word frequency counter.

    view - An IPython DirectView
    fnames - The filenames containing the split data.
    """
    assert len(fnames) == len(view.targets)
    view.scatter('fname', fnames, flatten=True)
    ar = view.apply(wordfreq, ipp.Reference('fname'))
    freqs_list = ar.get()
    word_set = set()
    for f in freqs_list:
        word_set.update(f.keys())
    freqs = dict(zip(word_set, repeat(0)))
    for f in freqs_list:
        for word, count in f.items():
            freqs[word] += count
    return freqs
Beispiel #16
0
    def test_nested_getitem_setitem(self):
        """get and set with view['a.b']"""
        view = self.client[-1]
        view.execute('\n'.join([
            'class A(object): pass',
            'a = A()',
            'a.b = 128',
            ]), block=True)
        ra = pmod.Reference('a')

        r = view.apply_sync(lambda x: x.b, ra)
        self.assertEqual(r, 128)
        self.assertEqual(view['a.b'], 128)

        view['a.b'] = 0

        r = view.apply_sync(lambda x: x.b, ra)
        self.assertEqual(r, 0)
        self.assertEqual(view['a.b'], 0)
Beispiel #17
0
    def test_return_exceptions_postmortem(self):
        self.minimum_engines(3)
        dv = self.client[:]
        bad_id = dv.targets[1]
        dv.scatter("rank", dv.targets, flatten=True)

        def fail_on_bad_id(rank, bad_id):
            if rank == bad_id:
                raise ValueError(f"{rank} is bad!")
            return rank

        ar = dv.apply_async(fail_on_bad_id, ipp.Reference('rank'), bad_id)
        with raises_remote(ValueError):
            ar.get()

        result = ar.get(return_exceptions=True)
        assert result[:1] == dv.targets[:1]
        assert result[2:] == dv.targets[2:]
        assert isinstance(result[1], ipp.RemoteError)
Beispiel #18
0
    def test_wait_first_exception(self):
        dv = self.client[:]

        def fail(i):
            print(i)
            import time

            if i == 0:
                print(1 / i)
            time.sleep(1)
            return i

        dv.scatter('rank', range(len(dv)), flatten=True, block=True)
        amr = dv.apply_async(fail, ipp.Reference("rank"))
        tic = time.perf_counter()
        done, pending = amr.wait(timeout=5, return_when=ipp.FIRST_EXCEPTION)
        assert done
        assert len(done) == 1
        first_done = done.pop()
        assert first_done.msg_ids == amr.msg_ids[:1]
        assert amr.wait(timeout=10) == True
        done, pending = amr.wait(timeout=0, return_when=ipp.FIRST_EXCEPTION)
        assert pending == set()
        assert len(done) == len(amr)
Beispiel #19
0
    view['u_hist'] = []

    # set vector/scalar implementation details
    impl = {}
    impl['ic'] = 'vectorized'
    impl['inner'] = 'scalar'
    impl['bc'] = 'vectorized'

    # execute some files so that the classes we need will be defined on the engines:
    view.run('RectPartitioner.py')
    view.run('wavesolver.py')

    # setup remote partitioner
    # note that Reference means that the argument passed to setup_partitioner will be the
    # object named 'my_id' in the engine's namespace
    view.apply_sync(setup_partitioner, ipp.Reference('my_id'), num_procs, grid,
                    partition)
    # wait for initial communication to complete
    view.execute('mpi.barrier()')
    # setup remote solvers
    view.apply_sync(
        setup_solver,
        I,
        f,
        c,
        bc,
        Lx,
        Ly,
        partitioner=ipp.Reference('partitioner'),
        dt=0,
        implementation=impl,
#create dataset
data = np.random.uniform(-10,10,size=(5000000,3))
df = pd.DataFrame(data,columns=('x1','x2','x3'))

df_split = np.array_split(df, 6, axis=0)

#load the dataframe chunk into the core
for i in dview.targets:
    print('chunk:',i,'df shape:',df_split[i].shape)
    rc[i]['df'] = df_split[i]
        
    
#create a reference to each dataframe
#this allows you do pass the reference instead of the data
rdf = [ipp.Reference('df') for i in range(num_cores)]

def map_nunique(df):
    return df.nunique()

def reduce_nunique(x):
    return pd.concat(x).groupby(level=0).sum()
    
%%time
reduce_nunique(dview.map_sync(map_nunique,rdf))

%%time
df.nunique()

def map_nearest_neighbors(df,qry,k=10):
    euclid = np.sum((df.values-qry)**2,axis=1)
Beispiel #21
0
pub_url = root.apply_sync(lambda: com.pub_url)  # noqa: F821

# gather the connection information into a dict
ar = view.apply_async(lambda: com.info)  # noqa: F821
peers = ar.get_dict()

# this is a dict, keyed by engine ID, of the connection info for the EngineCommunicators


# connect the engines to each other:
def connect(com, peers, tree, pub_url, root_id):
    """this function will be called on the engines"""
    com.connect(peers, tree, pub_url, root_id)


view.apply_sync(connect, ipp.Reference('com'), peers, btree, pub_url, root_id)


# functions that can be used for reductions
# max and min builtins can be used as well
def add(a, b):
    """cumulative sum reduction"""
    return a + b


def mul(a, b):
    """cumulative product reduction"""
    return a * b


view['add'] = add
Beispiel #22
0
import time

import ipyparallel as parallel

# create client & view
rc = parallel.Client()
dv = rc[:]
v = rc.load_balanced_view()

# scatter 'id', so id=0,1,2 on engines 0,1,2
dv.scatter('id', rc.ids, flatten=True)
print("Engine IDs: ", dv['id'])

# create a Reference to `id`. This will be a different value on each engine
ref = parallel.Reference('id')
print("sleeping for `id` seconds on each engine")
tic = time.time()
ar = dv.apply(time.sleep, ref)
for i, r in enumerate(ar):
    print("%i: %.3f" % (i, time.time() - tic))


def sleep_here(t):
    import time
    time.sleep(t)
    return id, t


# one call per task
print("running with one call per task")
Beispiel #23
0
 def test_reference_nameerror(self):
     v = self.client[self.client.ids[0]]
     r = pmod.Reference('elvis_has_left')
     echo = lambda x: x
     self.assertRaisesRemote(NameError, v.apply_sync, echo, r)
Beispiel #24
0
 def test_eval_reference(self):
     v = self.client[self.client.ids[0]]
     v['g'] = list(range(5))
     rg = pmod.Reference('g[0]')
     echo = lambda x: x
     self.assertEqual(v.apply_sync(echo, rg), 0)
Beispiel #25
0
    # create the engine connectors
    view.execute('com = EngineCommunicator()')

    # gather the connection information into a single dict
    ar = view.apply_async(lambda: com.info)  # noqa: F821
    peers = ar.get_dict()
    # print peers
    # this is a dict, keyed by engine ID, of the connection info for the EngineCommunicators

    # setup remote partitioner
    # note that Reference means that the argument passed to setup_partitioner will be the
    # object named 'com' in the engine's namespace
    view.apply_sync(
        setup_partitioner,
        ipp.Reference('com'),
        peers,
        ipp.Reference('my_id'),
        num_procs,
        grid,
        partition,
    )
    time.sleep(1)
    # convenience lambda to call solver.solve:
    _solve = lambda *args, **kwargs: solver.solve(*args, **kwargs)

    if ns.scalar:
        impl['inner'] = 'scalar'
        # setup remote solvers
        view.apply_sync(
            setup_solver,
Beispiel #26
0
import time

import ipyparallel as ipp

# create client & view
rc = ipp.Client()
dv = rc[:]
v = rc.load_balanced_view()

# scatter 'id', so id=0,1,2 on engines 0,1,2
dv.scatter('id', rc.ids, flatten=True)
print("Engine IDs: ", dv['id'])

# create a Reference to `id`. This will be a different value on each engine
ref = ipp.Reference('id')
print("sleeping for `id` seconds on each engine")
tic = time.time()
ar = dv.apply(time.sleep, ref)
for i, r in enumerate(ar):
    print("%i: %.3f" % (i, time.time() - tic))


def sleep_here(t):
    import time
    time.sleep(t)
    return id, t


# one call per task
print("running with one call per task")
Beispiel #27
0
 def test_remote_reference(self):
     v = self.client[-1]
     v['a'] = 123
     ra = pmod.Reference('a')
     b = v.apply_sync(lambda x: x, ra)
     self.assertEqual(b, 123)