def run_hnn_core(backend=None, n_jobs=1): """Test to check if hnn-core does not break.""" # small snippet of data on data branch for now. To be deleted # later. Data branch should have only commit so it does not # pollute the history. data_url = ('https://raw.githubusercontent.com/jonescompneurolab/' 'hnn-core/test_data/dpl.txt') if not op.exists('dpl.txt'): _fetch_file(data_url, 'dpl.txt') dpl_master = loadtxt('dpl.txt') hnn_core_root = op.dirname(hnn_core.__file__) # default params params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) # run the simulation net = Network(params) if backend == 'mpi': with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): dpl = simulate_dipole(net)[0] elif backend == 'joblib': with JoblibBackend(n_jobs=n_jobs): dpl = simulate_dipole(net)[0] else: dpl = simulate_dipole(net)[0] # write the dipole to a file and compare fname = './dpl2.txt' dpl.write(fname) dpl_pr = loadtxt(fname) assert_array_equal(dpl_pr[:, 2], dpl_master[:, 2]) # L2 assert_array_equal(dpl_pr[:, 3], dpl_master[:, 3]) # L5 # Test spike type counts spiketype_counts = {} for spikegid in net.spikes.gids[0]: if net.gid_to_type(spikegid) not in spiketype_counts: spiketype_counts[net.gid_to_type(spikegid)] = 0 else: spiketype_counts[net.gid_to_type(spikegid)] += 1 assert 'common' not in spiketype_counts assert 'exgauss' not in spiketype_counts assert 'extpois' not in spiketype_counts assert spiketype_counts == { 'evprox1': 269, 'L2_basket': 54, 'L2_pyramidal': 113, 'L5_pyramidal': 395, 'L5_basket': 85, 'evdist1': 234, 'evprox2': 269 }
def _run(self, banner=True, sim_length=None): with self.killed_lock: self.killed = False # make copy of params dict in Params object before # modifying tstop sim_params = hnn_core_compat_params(self.params) if sim_length is not None: sim_params['tstop'] = round(sim_length, 8) while True: if self.ncore == 0: raise RuntimeError("No cores available for simulation") try: sim_log = self._log_sim_status(parent=self) with redirect_stdout(sim_log): # create the network from the parameter file # Note: NEURON objects haven't been created yet net = Network(sim_params, add_drives_from_params=True) with MPIBackend(n_procs=self.ncore, mpi_cmd='mpiexec') as backend: self.backend = backend with self.killed_lock: if self.killed: raise RuntimeError("Terminated") sim_data = simulate(net) self.backend = None break except RuntimeError as e: if self.ncore == 1: # can't reduce ncore any more print(str(e)) self._updatewaitsimwin(str(e)) raise RuntimeError("Simulation failed to start") # check if proc was killed before retrying with fewer cores with self.killed_lock: if self.killed: raise RuntimeError("Terminated") self.ncore = ceil(self.ncore / 2) txt = "INFO: Failed starting simulation, retrying with %d cores" \ % self.ncore print(txt) self._updatewaitsimwin(txt) # put sim_data into the val attribute of a ResultObj self.result_signal.sig.emit(ResultObj(sim_data, self.params))
def _run_hnn_core_fixture(backend=None, n_procs=None, n_jobs=1, reduced=False, record_vsoma=False, record_isoma=False, postproc=True): hnn_core_root = op.dirname(hnn_core.__file__) # default params params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) if reduced: params.update({ 'N_pyr_x': 3, 'N_pyr_y': 3, 'tstop': 25, 't_evprox_1': 5, 't_evdist_1': 10, 't_evprox_2': 20, 'N_trials': 2 }) net = Network(params) # number of trials simulated assert all( len(src_feed_times) == params['N_trials'] for src_type, src_feed_times in net.feed_times.items() if src_type != 'tonic') if backend == 'mpi': with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) elif backend == 'joblib': with JoblibBackend(n_jobs=n_jobs): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) else: dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) return dpls, net
def _run_hnn_core_fixture(backend=None, n_procs=None, n_jobs=1, reduced=False, record_vsoma=False, record_isoma=False, postproc=True): hnn_core_root = op.dirname(hnn_core.__file__) # default params params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) if reduced: params.update({ 'N_pyr_x': 3, 'N_pyr_y': 3, 'tstop': 40, 't_evprox_1': 5, 't_evdist_1': 10, 't_evprox_2': 20, 'N_trials': 2 }) net = Network(params, add_drives_from_params=True) # number of trials simulated for drive in net.external_drives.values(): assert len(drive['events']) == params['N_trials'] if backend == 'mpi': with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) elif backend == 'joblib': with JoblibBackend(n_jobs=n_jobs): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) else: dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc) return dpls, net
def _run_hnn_core_fixture(backend=None, n_procs=None, n_jobs=1, reduced=False, record_vsoma=False, record_isoma=False, postproc=False, electrode_array=None): hnn_core_root = op.dirname(hnn_core.__file__) # default params params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) tstop = 170. if reduced: params.update({'N_pyr_x': 3, 'N_pyr_y': 3, 't_evprox_1': 5, 't_evdist_1': 10, 't_evprox_2': 20, 'N_trials': 2}) tstop = 40. net = jones_2009_model(params, add_drives_from_params=True) if electrode_array is not None: for name, positions in electrode_array.items(): net.add_electrode_array(name, positions) if backend == 'mpi': with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc, tstop=tstop) elif backend == 'joblib': with JoblibBackend(n_jobs=n_jobs): dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc, tstop=tstop) else: dpls = simulate_dipole(net, record_vsoma=record_isoma, record_isoma=record_vsoma, postproc=postproc, tstop=tstop) # check that the network object is picklable after the simulation pickle.dumps(net) # number of trials simulated for drive in net.external_drives.values(): assert len(drive['events']) == params['N_trials'] return dpls, net
def test_run_mpibackend_oversubscribed(self, run_hnn_core_fixture): """Test running MPIBackend with oversubscribed number of procs""" hnn_core_root = op.dirname(hnn_core.__file__) params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) params.update({ 'N_pyr_x': 3, 'N_pyr_y': 3, 't_evprox_1': 5, 't_evdist_1': 10, 't_evprox_2': 20, 'N_trials': 2 }) net = jones_2009_model(params, add_drives_from_params=True) oversubscribed = round(cpu_count() * 1.5) with MPIBackend(n_procs=oversubscribed) as backend: assert backend.n_procs == oversubscribed simulate_dipole(net, tstop=40)
def test_terminate_mpibackend(self, run_hnn_core_fixture): """Test terminating MPIBackend from thread""" hnn_core_root = op.dirname(hnn_core.__file__) params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) params.update({ 'N_pyr_x': 3, 'N_pyr_y': 3, 'tstop': 40, 't_evprox_1': 5, 't_evdist_1': 10, 't_evprox_2': 20, 'N_trials': 2 }) net = Network(params, add_drives_from_params=True) with MPIBackend() as backend: event = Event() # start background thread that will kill all MPIBackends # until event.set() kill_t = Thread(target=_terminate_mpibackend, args=(event, backend)) # make thread a daemon in case we throw an exception # and don't run event.set() so that py.test will # not hang before exiting kill_t.daemon = True kill_t.start() with pytest.warns(UserWarning) as record: with pytest.raises( RuntimeError, match="MPI simulation failed. Return code: 1"): simulate_dipole(net) event.set() expected_string = "Child process failed unexpectedly" assert expected_string in record[0].message.args[0]
# The occurrence of each burst is jittered by a random, normally distributed # amount (20 ms standard deviation). We repeat the burst train 10 times, each # time with unique randomization. net = jones_2009_model() weights_ampa = {'L2_pyramidal': 5.4e-5, 'L5_pyramidal': 5.4e-5} net.add_bursty_drive('bursty', tstart=50., burst_rate=10, burst_std=20., numspikes=2, spike_isi=10, n_drive_cells=10, location='distal', weights_ampa=weights_ampa, event_seed=8) ############################################################################### # Finally, to simulate we use the # :class:`~hnn_core.parallel_backends.MPIBackend` class. This will # start the simulation across the number of processors (cores) specified by # ``n_procs`` using MPI. The ``'mpiexec'`` launcher is used from # ``openmpi``, which must be installed on the system from hnn_core import MPIBackend with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): dpls = simulate_dipole(net, tstop=310., n_trials=1) trial_idx = 0 dpls[trial_idx].plot()
def test_mpi_nprocs(self): """Test that MPIBackend can use more than 1 processor""" # if only 1 processor is available, then MPIBackend tests will not # be valid with MPIBackend() as backend: assert backend.n_procs > 1
params_fname = op.join(hnn_core_root, 'param', 'default.json') params = read_params(params_fname) ############################################################################### # Let's first simulate the dipole with some initial parameters. The parameter # definitions also contain the drives. Even though we could add drives # explicitly through our API # (see :ref:`sphx_glr_auto_examples_workflows_plot_simulate_evoked.py`), # for conciseness, # we add them automatically from the parameter files scale_factor = 3000. smooth_window_len = 30. tstop = exp_dpl.times[-1] net = jones_2009_model(params=params, add_drives_from_params=True) with MPIBackend(n_procs=n_procs): print("Running simulation with initial parameters") initial_dpl = simulate_dipole(net, tstop=tstop, n_trials=1)[0] initial_dpl = initial_dpl.scale(scale_factor).smooth(smooth_window_len) ############################################################################### # Now we start the optimization! from hnn_core.optimization import optimize_evoked with MPIBackend(n_procs=n_procs): net_opt = optimize_evoked(net, tstop=tstop, n_trials=1, target_dpl=exp_dpl, initial_dpl=initial_dpl,
'L5_basket': 0.5, 'L5_pyramidal': 0.5 } net.add_bursty_drive('beta_dist', tstart=0., burst_rate=10, burst_std=burst_std, numspikes=2, spike_isi=10, repeats=10, location=location, weights_ampa=weights_ampa_d, synaptic_delays=syn_delays_d, seedcore=3) with MPIBackend(n_procs=4): dpls_beta = simulate_dipole(net, n_trials=1) trial_idx = 0 decim = [10, 10] # decimate by a factor of 100 fig, axes = plt.subplots(4, 1, sharex=True, figsize=(6, 8), gridspec_kw={'height_ratios': [1, 1, 2, 4]}) net.cell_response.plot_spikes_hist(ax=axes[0], spike_types=['beta_dist'], show=False) net.cell_response.plot_spikes_hist(ax=axes[1], spike_types=['beta_prox'], show=False)