Exemplo n.º 1
0
 def _read_data(self, gather, compatible_output):
     """
     Return file-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
                            
     Gathered data is cached, so the MPI communication need only be done
     once, even if the method is called multiple times.
     """
     # what if the method is called with different values of
     # `compatible_output`? Need to cache these separately.
     if gather and simulator.state.num_processes > 1:
         if self._gathered:
             logger.debug("Loading previously gathered data from cache")
             self._gathered_file.seek(0)
             data = numpy.load(self._gathered_file)
         else:
             local_data = self._read_local_data(compatible_output)
             if self.population and hasattr(
                     self.population.celltype, 'always_local'
             ) and self.population.celltype.always_local:
                 data = local_data  # for always_local cells, no need to gather
             else:
                 data = recording.gather(local_data)
             logger.debug("Caching gathered data")
             self._gathered_file = tempfile.TemporaryFile()
             numpy.save(self._gathered_file, data)
             self._gathered = True
         return data
     else:
         return self._read_local_data(compatible_output)
Exemplo n.º 2
0
 def _get(self, gather=False, compatible_output=True, filter=None):
     """Return the recorded data as a Numpy array."""
     # compatible_output is not used, but is needed for compatibility with the nest module.
     # Does nest really need it?
     if self.variable == 'spikes':
         data = numpy.empty((0, 2))
         for id in self.filter_recorded(filter):
             spikes = numpy.array(id._cell.spike_times)
             spikes = spikes[spikes <= simulator.state.t + 1e-9]
             if len(spikes) > 0:
                 new_data = numpy.array(
                     [numpy.ones(spikes.shape) * id, spikes]).T
                 data = numpy.concatenate((data, new_data))
     elif self.variable == 'v':
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             v = numpy.array(id._cell.vtrace)
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(v.shape) * id, t, v]).T
             data = numpy.concatenate((data, new_data))
     elif self.variable == 'gsyn':
         data = numpy.empty((0, 4))
         for id in self.filter_recorded(filter):
             ge = numpy.array(id._cell.gsyn_trace['excitatory'])
             gi = numpy.array(id._cell.gsyn_trace['inhibitory'])
             if 'excitatory_TM' in id._cell.gsyn_trace:
                 ge_TM = numpy.array(id._cell.gsyn_trace['excitatory_TM'])
                 gi_TM = numpy.array(id._cell.gsyn_trace['inhibitory_TM'])
                 if ge.size == 0:
                     ge = ge_TM
                 elif ge.size == ge_TM.size:
                     ge = ge + ge_TM
                 else:
                     raise Exception(
                         "Inconsistent conductance array sizes: ge.size=%d, ge_TM.size=%d",
                         (ge.size, ge_TM.size))
                 if gi.size == 0:
                     gi = gi_TM
                 elif gi.size == gi_TM.size:
                     gi = gi + gi_TM
                 else:
                     raise Exception()
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(ge.shape) * id, t, ge,
                                     gi]).T
             data = numpy.concatenate((data, new_data))
     else:
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             var = numpy.array(id._cell.traces[self.variable])
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(var.shape) * id, t, var]).T
             data = numpy.concatenate((data, new_data))
         #raise Exception("Recording of %s not implemented." % self.variable)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
     return data
Exemplo n.º 3
0
 def _get(self, gather=False, compatible_output=True, filter=None):
     """Return the recorded data as a Numpy array."""
     # compatible_output is not used, but is needed for compatibility with the nest module.
     # Does nest really need it?
     if self.variable == "spikes":
         data = numpy.empty((0, 2))
         for id in self.filter_recorded(filter):
             spikes = numpy.array(id._cell.spike_times)
             spikes = spikes[spikes <= simulator.state.t + 1e-9]
             if len(spikes) > 0:
                 new_data = numpy.array([numpy.ones(spikes.shape) * id, spikes]).T
                 data = numpy.concatenate((data, new_data))
     elif self.variable == "v":
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             v = numpy.array(id._cell.vtrace)
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(v.shape) * id, t, v]).T
             data = numpy.concatenate((data, new_data))
     elif self.variable == "gsyn":
         data = numpy.empty((0, 4))
         for id in self.filter_recorded(filter):
             ge = numpy.array(id._cell.gsyn_trace["excitatory"])
             gi = numpy.array(id._cell.gsyn_trace["inhibitory"])
             if "excitatory_TM" in id._cell.gsyn_trace:
                 ge_TM = numpy.array(id._cell.gsyn_trace["excitatory_TM"])
                 gi_TM = numpy.array(id._cell.gsyn_trace["inhibitory_TM"])
                 if ge.size == 0:
                     ge = ge_TM
                 elif ge.size == ge_TM.size:
                     ge = ge + ge_TM
                 else:
                     raise Exception(
                         "Inconsistent conductance array sizes: ge.size=%d, ge_TM.size=%d", (ge.size, ge_TM.size)
                     )
                 if gi.size == 0:
                     gi = gi_TM
                 elif gi.size == gi_TM.size:
                     gi = gi + gi_TM
                 else:
                     raise Exception()
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(ge.shape) * id, t, ge, gi]).T
             data = numpy.concatenate((data, new_data))
     else:
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             var = numpy.array(id._cell.traces[self.variable])
             t = numpy.array(id._cell.record_times)
             new_data = numpy.array([numpy.ones(var.shape) * id, t, var]).T
             data = numpy.concatenate((data, new_data))
         # raise Exception("Recording of %s not implemented." % self.variable)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
     return data
Exemplo n.º 4
0
 def _read_data_from_memory(self, gather, compatible_output):
     """
     Return memory-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
     """
     data = nest.GetStatus(self._device, 'events')[0]  # only for spikes?
     if compatible_output:
         data = self._events_to_array(data)
         data = self._add_initial_and_scale(data)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
     return data
Exemplo n.º 5
0
 def read_data_from_memory(self, gather, compatible_output):
     """
     Return memory-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
     """
     data = nest.GetStatus(self.device,'events')[0]
     if compatible_output:
         data = self.events_to_array(data)
         data = self.scale_data(data)  
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)     
         self._gathered_file = tempfile.TemporaryFile()
         numpy.save(self._gathered_file, data)
         self._gathered = True
     return data
Exemplo n.º 6
0
 def read_data_from_memory(self, gather, compatible_output):
     """
     Return memory-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
     """
     data = nest.GetStatus(self.device, 'events')[0]
     if compatible_output:
         data = self.events_to_array(data)
         data = self.scale_data(data)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
         self._gathered_file = tempfile.TemporaryFile()
         numpy.save(self._gathered_file, data)
         self._gathered = True
     return data
Exemplo n.º 7
0
 def _get(self, gather=False, compatible_output=True, filter=None):
     """Return the recorded data as a Numpy array."""
     # compatible_output is not used, but is needed for compatibility with the nest module.
     # Does nest really need it?
     net = simulator.net
     if self.variable == 'spikes':
         data = numpy.empty((0, 2))
         for id in self.filter_recorded(filter):
             rec = self.recorders[id]
             if isinstance(net.object(id), pypcsim.SpikingInputNeuron):
                 spikes = 1000.0 * numpy.array(
                     net.object(id).getSpikeTimes()
                 )  # is this special case really necessary?
                 spikes = spikes[spikes <= simulator.state.t]
             else:
                 spikes = 1000.0 * numpy.array(
                     net.object(rec).getSpikeTimes())
             spikes = spikes.flatten()
             spikes = spikes[spikes <= simulator.state.t + 1e-9]
             if len(spikes) > 0:
                 new_data = numpy.array(
                     [numpy.ones(spikes.shape, dtype=int) * id, spikes]).T
                 data = numpy.concatenate((data, new_data))
     elif self.variable == 'v':
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             rec = self.recorders[id]
             v = 1000.0 * numpy.array(net.object(rec).getRecordedValues())
             v = v.flatten()
             final_v = 1000.0 * net.object(id).getVm()
             v = numpy.append(v, final_v)
             dt = simulator.state.dt
             t = dt * numpy.arange(v.size)
             new_data = numpy.array(
                 [numpy.ones(v.shape, dtype=int) * id, t, v]).T
             data = numpy.concatenate((data, new_data))
     elif self.variable == 'gsyn':
         raise NotImplementedError
     else:
         raise Exception("Recording of %s not implemented." % self.variable)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
     return data
Exemplo n.º 8
0
 def _get(self, gather=False, compatible_output=True, filter=None):
     """Return the recorded data as a Numpy array."""
     # compatible_output is not used, but is needed for compatibility with the nest module.
     # Does nest really need it?
     net = simulator.net
     if self.variable == "spikes":
         data = numpy.empty((0, 2))
         for id in self.filter_recorded(filter):
             rec = self.recorders[id]
             if isinstance(net.object(id), pypcsim.SpikingInputNeuron):
                 spikes = 1000.0 * numpy.array(
                     net.object(id).getSpikeTimes()
                 )  # is this special case really necessary?
                 spikes = spikes[spikes <= simulator.state.t]
             else:
                 spikes = 1000.0 * numpy.array(net.object(rec).getSpikeTimes())
             spikes = spikes.flatten()
             spikes = spikes[spikes <= simulator.state.t + 1e-9]
             if len(spikes) > 0:
                 new_data = numpy.array([numpy.ones(spikes.shape, dtype=int) * id, spikes]).T
                 data = numpy.concatenate((data, new_data))
     elif self.variable == "v":
         data = numpy.empty((0, 3))
         for id in self.filter_recorded(filter):
             rec = self.recorders[id]
             v = 1000.0 * numpy.array(net.object(rec).getRecordedValues())
             v = v.flatten()
             final_v = 1000.0 * net.object(id).getVm()
             v = numpy.append(v, final_v)
             dt = simulator.state.dt
             t = dt * numpy.arange(v.size)
             new_data = numpy.array([numpy.ones(v.shape, dtype=int) * id, t, v]).T
             data = numpy.concatenate((data, new_data))
     elif self.variable == "gsyn":
         raise NotImplementedError
     else:
         raise Exception("Recording of %s not implemented." % self.variable)
     if gather and simulator.state.num_processes > 1:
         data = recording.gather(data)
     return data
Exemplo n.º 9
0
 def read_data(self, gather, compatible_output, always_local=False):
     """
     Return file-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
                            
     Gathered data is cached, so the MPI communication need only be done
     once, even if the method is called multiple times.
     """
     # what if the method is called with different values of
     # `compatible_output`? Need to cache these separately.
     if not self.to_memory:
         if gather and simulator.state.num_processes > 1:
             if self._gathered:
                 logger.debug("Loading previously gathered data from cache")
                 self._gathered_file.seek(0)
                 data = numpy.load(self._gathered_file)
             else:
                 local_data = self.read_local_data(compatible_output)
                 if always_local:
                     data = local_data # for always_local cells, no need to gather
                 else:
                     logger.debug("Gathering data")
                     data = recording.gather(local_data)
                 logger.debug("Caching gathered data")
                 self._gathered_file = tempfile.TemporaryFile()
                 numpy.save(self._gathered_file, data)
                 self._gathered = True
         else:
             data = self.read_local_data(compatible_output)
         if len(data.shape) == 1:
             data = data.reshape((1, data.size))
         return data
     else:
         return self.read_data_from_memory(gather, compatible_output)
Exemplo n.º 10
0
 def read_data(self, gather, compatible_output, always_local=False):
     """
     Return file-recorded data.
     
     `gather` -- if True, gather data from all MPI nodes.
     `compatible_output` -- if True, transform the data into the PyNN
                            standard format.
                            
     Gathered data is cached, so the MPI communication need only be done
     once, even if the method is called multiple times.
     """
     # what if the method is called with different values of
     # `compatible_output`? Need to cache these separately.
     if not self.to_memory:
         if gather and simulator.state.num_processes > 1:
             if self._gathered:
                 logger.debug("Loading previously gathered data from cache")
                 self._gathered_file.seek(0)
                 data = numpy.load(self._gathered_file)
             else:
                 local_data = self.read_local_data(compatible_output)
                 if always_local:
                     data = local_data  # for always_local cells, no need to gather
                 else:
                     logger.debug("Gathering data")
                     data = recording.gather(local_data)
                 logger.debug("Caching gathered data")
                 self._gathered_file = tempfile.TemporaryFile()
                 numpy.save(self._gathered_file, data)
                 self._gathered = True
         else:
             data = self.read_local_data(compatible_output)
         if len(data.shape) == 1:
             data = data.reshape((1, data.size))
         return data
     else:
         return self.read_data_from_memory(gather, compatible_output)
Exemplo n.º 11
0
from pyNN.recording import gather
import numpy
from mpi4py import MPI
import time

comm = MPI.COMM_WORLD

for x in range(7):
    N = pow(10, x)
    local_data = numpy.empty((N, 2))
    local_data[:, 0] = numpy.ones(N, dtype=float) * comm.rank
    local_data[:, 1] = numpy.random.rand(N)

    start_time = time.time()
    all_data = gather(local_data)
    #print comm.rank, "local", local_data
    if comm.rank == 0:
        #    print "all", all_data
        print N, time.time() - start_time
Exemplo n.º 12
0
from pyNN.recording import gather
import numpy
from mpi4py import MPI
import time

comm = MPI.COMM_WORLD

for x in range(7):
    N = pow(10, x)
    local_data = numpy.empty((N, 2))
    local_data[:, 0] = numpy.ones(N, dtype=float) * comm.rank
    local_data[:, 1] = numpy.random.rand(N)

    start_time = time.time()
    all_data = gather(local_data)
    # print comm.rank, "local", local_data
    if comm.rank == 0:
        #    print "all", all_data
        print N, time.time() - start_time