def finalize(self, sim): # convert NEST gdf files into SONATA spikes/ format # TODO: Create a gdf_adaptor in bmtk/utils/reports/spike_trains to improve conversion speed. if MPI_RANK == 0: for gdf_file in glob.glob(self._spike_labels + '*.gdf'): self.__parse_gdf(gdf_file, sim.net.gid_map) # self._spike_writer.add_spikes_file(gdf_file) io.barrier() if self._csv_fname is not None: self._spike_writer.to_csv(self._csv_fname, sort_order=self._sort_order) # io.barrier() if self._h5_fname is not None: # TODO: reimplement with pandas self._spike_writer.to_sonata(self._h5_fname, sort_order=self._sort_order) # io.barrier() if self._nwb_fname is not None: self._spike_writer.to_nwb(self._nwb_fname, sort_order=self._sort_order) # io.barrier() self._spike_writer.close() self.__clean_gdf_files()
def finalize(self, sim): # convert NEST gdf files into SONATA spikes/ format # TODO: Create a gdf_adaptor in bmtk/utils/reports/spike_trains to improve conversion speed. if MPI_RANK == 0: gid_map = sim.net.gid_map read_spikes_file(spike_trains_writer=self._spike_writer, gid_map=gid_map, label=self._spike_labels) io.barrier() if self._csv_fname is not None: self._spike_writer.to_csv(self._csv_fname, sort_order=self._sort_order) # io.barrier() if self._h5_fname is not None: # TODO: reimplement with pandas self._spike_writer.to_sonata(self._h5_fname, sort_order=self._sort_order) # io.barrier() if self._nwb_fname is not None: self._spike_writer.to_nwb(self._nwb_fname, sort_order=self._sort_order) # io.barrier() self._spike_writer.close() self._clean_files()
def finalize(self, sim): if MPI_RANK == 0: for gdf_file in glob.glob(self._spike_labels + '*.gdf'): self._spike_writer.add_spikes_file(gdf_file) io.barrier() gid_map = sim._graph._nestid2gid if self._csv_fname is not None: self._spike_writer.to_csv(self._csv_fname, sort_order=self._sort_order, gid_map=gid_map) io.barrier() if self._h5_fname is not None: self._spike_writer.to_hdf5(self._h5_fname, sort_order=self._sort_order, gid_map=gid_map) io.barrier() if self._nwb_fname is not None: self._spike_writer.to_nwb(self._nwb_fname, sort_order=self._sort_order, gid_map=gid_map) io.barrier() self._spike_writer.close()
def run(self, tstop=None): if tstop is None: tstop = self._tstop for mod in self._mods: mod.initialize(self) io.barrier() io.log_info('Starting Simulation') n, res, data_res = self._get_block_trial(tstop) if n > 0: for r in moves.range(n): nest.Simulate(data_res) if res > 0: nest.Simulate(res * self.dt) if n < 0: nest.Simulate(tstop) io.barrier() io.log_info('Simulation finished, finalizing results.') for mod in self._mods: mod.finalize(self) io.barrier() io.log_info('Done.')
def finalize(self, sim): io.barrier() # Makes sure all nodes finish, but not sure if actually required by nest # min_delay needs to be fetched after simulation otherwise the value will be off. There also seems to be some # MPI barrier inside GetKernelStatus self._min_delay = nest.GetKernelStatus('min_delay') # print self._min_delay if self._to_h5 and MPI_RANK == 0: for gid in self._gids: self._var_recorder.add_cell(gid, sec_list=[0], seg_list=[0.0]) # Initialize hdf5 file including preallocated data block of recorded variables # Unfortantely with NEST the final time-step recorded can't be calculated in advanced, and even with the # same min/max_delay can be different. We need to read the output-file to get n_steps def get_var_recorder(node_recording_df): if not self._var_recorder.is_initialized: self._var_recorder.tstart = node_recording_df['time'].min() self._var_recorder.tstop = node_recording_df['time'].max() self._var_recorder.dt = self._interval self._var_recorder.initialize(len(node_recording_df)) return self._var_recorder gid_map = sim.net._nestid2gid for nest_file in glob.glob('{}*'.format(self.__output_label)): report_df = pd.read_csv(nest_file, index_col=False, names=['nest_id', 'time']+self._variable_name, sep='\t') for grp_id, grp_df in report_df.groupby(by='nest_id'): gid = gid_map[grp_id] vr = get_var_recorder(grp_df) for var_name in self._variable_name: vr.record_cell_block(gid, var_name, grp_df[var_name]) if self._delete_dat: # remove csv file created by nest os.remove(nest_file) self._var_recorder.close() io.barrier()
def run(self, duration=None): if duration is None: duration = self.duration for mod in self._mods: mod.initialize(self) io.barrier() io.log_info('Starting Simulation') n, res, data_res = self._get_block_trial(duration) if n > 0: for r in xrange(n): nest.Simulate(data_res) if res > 0: nest.Simulate(res * self.dt) if n < 0: nest.Simulate(duration) io.barrier() io.log_info('Simulation finished, finalizing results.') for mod in self._mods: mod.finalize(self) io.barrier() io.log_info('Done.')
def finalize(self, sim): io.barrier( ) # Makes sure all nodes finish, but not sure if actually required by nest # min_delay needs to be fetched after simulation otherwise the value will be off. There also seems to be some # MPI barrier inside GetKernelStatus self._min_delay = nest.GetKernelStatus('min_delay') if self._to_h5 and MPI_RANK == 0: # Initialize hdf5 file including preallocated data block of recorded variables # Unfortantely with NEST the final time-step recorded can't be calculated in advanced, and even with the # same min/max_delay can be different. We need to read the output-file to get n_steps def get_var_recorder(node_recording_df): if self._var_recorder is None: self._var_recorder = CompartmentReport( self._file_name, mode='w', variable=self._variable_name[0], default_population=self._population, tstart=node_recording_df['time'].min(), tstop=node_recording_df['time'].max(), dt=self._interval, n_steps=len(node_recording_df), mpi_size=1) if self._to_h5 and MPI_RANK == 0: for gid in self._gids: pop_id = gid_map.get_pool_id(gid) self._var_recorder.add_cell( pop_id.node_id, element_ids=[0], element_pos=[0.0], population=pop_id.population) self._var_recorder.initialize() return self._var_recorder gid_map = sim.net.gid_map for nest_file in glob.glob('{}*'.format(self.__output_label)): # report_df = pd.read_csv(nest_file, index_col=False, names=['nest_id', 'time']+self._variable_name, # sep='\t', comment='#') report_df = read_dat(nest_file, self._variable_name) # print(report_df) # exit() for grp_id, grp_df in report_df.groupby(by='nest_id'): pop_id = gid_map.get_pool_id(grp_id) vr = get_var_recorder(grp_df) for var_name in self._variable_name: vr.record_cell_block( node_id=pop_id.node_id, vals=grp_df[var_name], beg_step=0, end_step=vr[pop_id.population].n_steps(), population=pop_id.population) if self._delete_dat: # remove csv file created by nest os.remove(nest_file) self._var_recorder.close() io.barrier()