コード例 #1
0
ファイル: modelfitting.py プロジェクト: sivaven/brian
    def initialize(self, **kwds):
        self.use_gpu = self.unit_type=='GPU'
        # Gets the key,value pairs in shared_data
        for key, val in self.shared_data.iteritems():
            setattr(self, key, val)
        # Gets the key,value pairs in **kwds
        for key, val in kwds.iteritems():
            setattr(self, key, val)

#        log_info(self.model)
        self.model = cPickle.loads(self.model)
#        log_info(self.model)

        # if model is a string
        if type(self.model) is str:
            self.model = Equations(self.model)

        self.total_steps = int(self.duration / self.dt)

        self.neurons = self.nodesize
        self.groups = self.groups

        # Time slicing
        self.input = self.input[0:self.slices * (len(self.input) / self.slices)] # makes sure that len(input) is a multiple of slices
        self.duration = len(self.input) * self.dt # duration of the input
        self.sliced_steps = len(self.input) / self.slices # timesteps per slice
        self.overlap_steps = int(self.overlap / self.dt) # timesteps during the overlap
        self.total_steps = self.sliced_steps + self.overlap_steps # total number of timesteps
        self.sliced_duration = self.overlap + self.duration / self.slices # duration of the vectorized simulation
        self.N = self.neurons * self.slices # TOTAL number of neurons in this worker

        self.input = hstack((zeros(self.overlap_steps), self.input)) # add zeros at the beginning because there is no overlap from the previous slice

        # Prepares data (generates I_offset, spiketimes, spiketimes_offset)
        self.prepare_data()

        # Add 'refractory' parameter on the CPU on the CPU only
        if not self.use_gpu:
            if self.max_refractory is not None:
                refractory = 'refractory'
                self.model.add_param('refractory', second)
            else:
                refractory = self.refractory
        else:
            if self.max_refractory is not None:
                refractory = 0*ms
            else:
                refractory = self.refractory

        # Must recompile the Equations : the functions are not transfered after pickling/unpickling
        self.model.compile_functions()

        self.group = NeuronGroup(self.N,
                                 model=self.model,
                                 reset=self.reset,
                                 threshold=self.threshold,
                                 refractory=refractory,
                                 max_refractory = self.max_refractory,
                                 method = self.method,
                                 clock=Clock(dt=self.dt))
        
        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        # Injects current in consecutive subgroups, where I_offset have the same value
        # on successive intervals
        k = -1
        for i in hstack((nonzero(diff(self.I_offset))[0], len(self.I_offset) - 1)):
            I_offset_subgroup_value = self.I_offset[i]
            I_offset_subgroup_length = i - k
            sliced_subgroup = self.group.subgroup(I_offset_subgroup_length)
            input_sliced_values = self.input[I_offset_subgroup_value:I_offset_subgroup_value + self.total_steps]
            sliced_subgroup.set_var_by_array(self.input_var, TimedArray(input_sliced_values, clock=self.group.clock))
            k = i

        if self.use_gpu:
            # Select integration scheme according to method
            if self.method == 'Euler': scheme = euler_scheme
            elif self.method == 'RK': scheme = rk2_scheme
            elif self.method == 'exponential_Euler': scheme = exp_euler_scheme
            else: raise Exception("The numerical integration method is not valid")
            
            self.mf = GPUModelFitting(self.group, self.model, self.input, self.I_offset,
                                      self.spiketimes, self.spiketimes_offset, zeros(self.neurons), 0*ms, self.delta,
                                      precision=self.precision, scheme=scheme)
        else:
            self.cc = CoincidenceCounter(self.group, self.spiketimes, self.spiketimes_offset,
                                        onset=self.onset, delta=self.delta)
コード例 #2
0
ファイル: modelfitting.py プロジェクト: yzerlaut/brian
    def initialize(self, **kwds):
        self.use_gpu = self.unit_type == 'GPU'
        # Gets the key,value pairs in shared_data
        for key, val in self.shared_data.iteritems():
            setattr(self, key, val)
        # Gets the key,value pairs in **kwds
        for key, val in kwds.iteritems():
            setattr(self, key, val)


#        log_info(self.model)
        self.model = cPickle.loads(self.model)
        #        log_info(self.model)

        # if model is a string
        if type(self.model) is str:
            self.model = Equations(self.model)

        self.total_steps = int(self.duration / self.dt)

        self.neurons = self.nodesize
        self.groups = self.groups

        # Time slicing
        self.input = self.input[0:self.slices * (
            len(self.input) /
            self.slices)]  # makes sure that len(input) is a multiple of slices
        self.duration = len(self.input) * self.dt  # duration of the input
        self.sliced_steps = len(
            self.input) / self.slices  # timesteps per slice
        self.overlap_steps = int(self.overlap /
                                 self.dt)  # timesteps during the overlap
        self.total_steps = self.sliced_steps + self.overlap_steps  # total number of timesteps
        self.sliced_duration = self.overlap + self.duration / self.slices  # duration of the vectorized simulation
        self.N = self.neurons * self.slices  # TOTAL number of neurons in this worker

        self.input = hstack(
            (zeros(self.overlap_steps), self.input)
        )  # add zeros at the beginning because there is no overlap from the previous slice

        # Prepares data (generates I_offset, spiketimes, spiketimes_offset)
        self.prepare_data()

        # Add 'refractory' parameter on the CPU on the CPU only
        if not self.use_gpu:
            if self.max_refractory is not None:
                refractory = 'refractory'
                self.model.add_param('refractory', second)
            else:
                refractory = self.refractory
        else:
            if self.max_refractory is not None:
                refractory = 0 * ms
            else:
                refractory = self.refractory

        # Must recompile the Equations : the functions are not transfered after pickling/unpickling
        self.model.compile_functions()

        self.group = NeuronGroup(self.N,
                                 model=self.model,
                                 reset=self.reset,
                                 threshold=self.threshold,
                                 refractory=refractory,
                                 max_refractory=self.max_refractory,
                                 method=self.method,
                                 clock=Clock(dt=self.dt))

        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        # Injects current in consecutive subgroups, where I_offset have the same value
        # on successive intervals
        k = -1
        for i in hstack(
            (nonzero(diff(self.I_offset))[0], len(self.I_offset) - 1)):
            I_offset_subgroup_value = self.I_offset[i]
            I_offset_subgroup_length = i - k
            sliced_subgroup = self.group.subgroup(I_offset_subgroup_length)
            input_sliced_values = self.input[
                I_offset_subgroup_value:I_offset_subgroup_value +
                self.total_steps]
            sliced_subgroup.set_var_by_array(
                self.input_var,
                TimedArray(input_sliced_values, clock=self.group.clock))
            k = i

        if self.use_gpu:
            # Select integration scheme according to method
            if self.method == 'Euler': scheme = euler_scheme
            elif self.method == 'RK': scheme = rk2_scheme
            elif self.method == 'exponential_Euler': scheme = exp_euler_scheme
            else:
                raise Exception(
                    "The numerical integration method is not valid")

            self.mf = GPUModelFitting(self.group,
                                      self.model,
                                      self.input,
                                      self.I_offset,
                                      self.spiketimes,
                                      self.spiketimes_offset,
                                      zeros(self.neurons),
                                      0 * ms,
                                      self.delta,
                                      precision=self.precision,
                                      scheme=scheme)
        else:
            self.cc = CoincidenceCounter(self.group,
                                         self.spiketimes,
                                         self.spiketimes_offset,
                                         onset=self.onset,
                                         delta=self.delta)
コード例 #3
0
ファイル: modelfitting.py プロジェクト: sivaven/brian
class ModelFitting(Fitness):
    def initialize(self, **kwds):
        self.use_gpu = self.unit_type=='GPU'
        # Gets the key,value pairs in shared_data
        for key, val in self.shared_data.iteritems():
            setattr(self, key, val)
        # Gets the key,value pairs in **kwds
        for key, val in kwds.iteritems():
            setattr(self, key, val)

#        log_info(self.model)
        self.model = cPickle.loads(self.model)
#        log_info(self.model)

        # if model is a string
        if type(self.model) is str:
            self.model = Equations(self.model)

        self.total_steps = int(self.duration / self.dt)

        self.neurons = self.nodesize
        self.groups = self.groups

        # Time slicing
        self.input = self.input[0:self.slices * (len(self.input) / self.slices)] # makes sure that len(input) is a multiple of slices
        self.duration = len(self.input) * self.dt # duration of the input
        self.sliced_steps = len(self.input) / self.slices # timesteps per slice
        self.overlap_steps = int(self.overlap / self.dt) # timesteps during the overlap
        self.total_steps = self.sliced_steps + self.overlap_steps # total number of timesteps
        self.sliced_duration = self.overlap + self.duration / self.slices # duration of the vectorized simulation
        self.N = self.neurons * self.slices # TOTAL number of neurons in this worker

        self.input = hstack((zeros(self.overlap_steps), self.input)) # add zeros at the beginning because there is no overlap from the previous slice

        # Prepares data (generates I_offset, spiketimes, spiketimes_offset)
        self.prepare_data()

        # Add 'refractory' parameter on the CPU on the CPU only
        if not self.use_gpu:
            if self.max_refractory is not None:
                refractory = 'refractory'
                self.model.add_param('refractory', second)
            else:
                refractory = self.refractory
        else:
            if self.max_refractory is not None:
                refractory = 0*ms
            else:
                refractory = self.refractory

        # Must recompile the Equations : the functions are not transfered after pickling/unpickling
        self.model.compile_functions()

        self.group = NeuronGroup(self.N,
                                 model=self.model,
                                 reset=self.reset,
                                 threshold=self.threshold,
                                 refractory=refractory,
                                 max_refractory = self.max_refractory,
                                 method = self.method,
                                 clock=Clock(dt=self.dt))
        
        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        # Injects current in consecutive subgroups, where I_offset have the same value
        # on successive intervals
        k = -1
        for i in hstack((nonzero(diff(self.I_offset))[0], len(self.I_offset) - 1)):
            I_offset_subgroup_value = self.I_offset[i]
            I_offset_subgroup_length = i - k
            sliced_subgroup = self.group.subgroup(I_offset_subgroup_length)
            input_sliced_values = self.input[I_offset_subgroup_value:I_offset_subgroup_value + self.total_steps]
            sliced_subgroup.set_var_by_array(self.input_var, TimedArray(input_sliced_values, clock=self.group.clock))
            k = i

        if self.use_gpu:
            # Select integration scheme according to method
            if self.method == 'Euler': scheme = euler_scheme
            elif self.method == 'RK': scheme = rk2_scheme
            elif self.method == 'exponential_Euler': scheme = exp_euler_scheme
            else: raise Exception("The numerical integration method is not valid")
            
            self.mf = GPUModelFitting(self.group, self.model, self.input, self.I_offset,
                                      self.spiketimes, self.spiketimes_offset, zeros(self.neurons), 0*ms, self.delta,
                                      precision=self.precision, scheme=scheme)
        else:
            self.cc = CoincidenceCounter(self.group, self.spiketimes, self.spiketimes_offset,
                                        onset=self.onset, delta=self.delta)

    def prepare_data(self):
        """
        Generates I_offset, spiketimes, spiketimes_offset from data,
        and also target_length and target_rates.
        
        The neurons are first grouped by time slice : there are group_size*group_count
        per group/time slice
        Within each time slice, the neurons are grouped by target train : there are
        group_size neurons per group/target train
        """
        # Generates I_offset
        self.I_offset = zeros(self.N, dtype=int)
        for slice in range(self.slices):
            self.I_offset[self.neurons * slice:self.neurons * (slice + 1)] = self.sliced_steps * slice

        # Generates spiketimes, spiketimes_offset, target_length, target_rates
        i, t = zip(*self.data)
        i = array(i)
        t = array(t)
        alls = []
        n = 0
        pointers = []
        dt = self.dt

        target_length = []
        target_rates = []
        model_target = []
        group_index = 0

        neurons_in_group = self.subpopsize
        for j in xrange(self.groups):
#            neurons_in_group = self.groups[j] # number of neurons in the current group and current worker
            s = sort(t[i == j])
            target_length.extend([len(s)] * neurons_in_group)
            target_rates.extend([firing_rate(s)] * neurons_in_group)

            for k in xrange(self.slices):
            # first sliced group : 0...0, second_train...second_train, ...
            # second sliced group : first_train_second_slice...first_train_second_slice, second_train_second_slice...
                spikeindices = (s >= k * self.sliced_steps * dt) & (s < (k + 1) * self.sliced_steps * dt) # spikes targeted by sliced neuron number k, for target j
                targeted_spikes = s[spikeindices] - k * self.sliced_steps * dt + self.overlap_steps * dt # targeted spikes in the "local clock" for sliced neuron k
                targeted_spikes = hstack((-1 * second, targeted_spikes, self.sliced_duration + 1 * second))
                model_target.extend([k + group_index * self.slices] * neurons_in_group)
                alls.append(targeted_spikes)
                pointers.append(n)
                n += len(targeted_spikes)
            group_index += 1
        pointers = array(pointers, dtype=int)
        model_target = array(hstack(model_target), dtype=int)

        self.spiketimes = hstack(alls)
        self.spiketimes_offset = pointers[model_target] # [pointers[i] for i in model_target]

        # Duplicates each target_length value 'group_size' times so that target_length[i]
        # is the length of the train targeted by neuron i
        self.target_length = array(target_length, dtype=int)
        self.target_rates = array(target_rates)

    def evaluate(self, **param_values):
        """
        Use fitparams['delays'] to take delays into account
        Use fitparams['refractory'] to take refractory into account
        """
        delays = param_values.pop('delays', zeros(self.neurons))
        refractory = param_values.pop('refractory', zeros(self.neurons))

        # kron spike delays
        delays = kron(delays, ones(self.slices))
        refractory = kron(refractory, ones(self.slices))

        # Sets the parameter values in the NeuronGroup object
        self.group.reinit()
        for param, value in param_values.iteritems():
            self.group.state(param)[:] = kron(value, ones(self.slices)) # kron param_values if slicing
            
        # Reinitializes the model variables
        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        if self.use_gpu:
            # Reinitializes the simulation object
            self.mf.reinit_vars(self.input, self.I_offset, self.spiketimes, self.spiketimes_offset, delays, refractory)
            # LAUNCHES the simulation on the GPU
            self.mf.launch(self.duration, self.stepsize)
            coincidence_count = self.mf.coincidence_count
            spike_count = self.mf.spike_count
        else:
            # set the refractory period
            if self.max_refractory is not None:
                self.group.refractory = refractory
            
            self.cc = CoincidenceCounter(self.group, self.spiketimes, self.spiketimes_offset,
                                        onset=self.onset, delta=self.delta)
            # Sets the spike delay values
            self.cc.spikedelays = delays
            # Reinitializes the simulation objects
            self.group.clock.reinit()
#            self.cc.reinit()
            net = Network(self.group, self.cc)
            # LAUNCHES the simulation on the CPU
            net.run(self.duration)
            coincidence_count = self.cc.coincidences
            spike_count = self.cc.model_length

        coincidence_count = sum(reshape(coincidence_count, (self.slices, -1)), axis=0)
        spike_count = sum(reshape(spike_count, (self.slices, -1)), axis=0)

        gamma = get_gamma_factor(coincidence_count, spike_count, self.target_length, self.target_rates, self.delta)

        return gamma
コード例 #4
0
ファイル: modelfitting.py プロジェクト: yzerlaut/brian
class ModelFitting(Fitness):
    def initialize(self, **kwds):
        self.use_gpu = self.unit_type == 'GPU'
        # Gets the key,value pairs in shared_data
        for key, val in self.shared_data.iteritems():
            setattr(self, key, val)
        # Gets the key,value pairs in **kwds
        for key, val in kwds.iteritems():
            setattr(self, key, val)


#        log_info(self.model)
        self.model = cPickle.loads(self.model)
        #        log_info(self.model)

        # if model is a string
        if type(self.model) is str:
            self.model = Equations(self.model)

        self.total_steps = int(self.duration / self.dt)

        self.neurons = self.nodesize
        self.groups = self.groups

        # Time slicing
        self.input = self.input[0:self.slices * (
            len(self.input) /
            self.slices)]  # makes sure that len(input) is a multiple of slices
        self.duration = len(self.input) * self.dt  # duration of the input
        self.sliced_steps = len(
            self.input) / self.slices  # timesteps per slice
        self.overlap_steps = int(self.overlap /
                                 self.dt)  # timesteps during the overlap
        self.total_steps = self.sliced_steps + self.overlap_steps  # total number of timesteps
        self.sliced_duration = self.overlap + self.duration / self.slices  # duration of the vectorized simulation
        self.N = self.neurons * self.slices  # TOTAL number of neurons in this worker

        self.input = hstack(
            (zeros(self.overlap_steps), self.input)
        )  # add zeros at the beginning because there is no overlap from the previous slice

        # Prepares data (generates I_offset, spiketimes, spiketimes_offset)
        self.prepare_data()

        # Add 'refractory' parameter on the CPU on the CPU only
        if not self.use_gpu:
            if self.max_refractory is not None:
                refractory = 'refractory'
                self.model.add_param('refractory', second)
            else:
                refractory = self.refractory
        else:
            if self.max_refractory is not None:
                refractory = 0 * ms
            else:
                refractory = self.refractory

        # Must recompile the Equations : the functions are not transfered after pickling/unpickling
        self.model.compile_functions()

        self.group = NeuronGroup(self.N,
                                 model=self.model,
                                 reset=self.reset,
                                 threshold=self.threshold,
                                 refractory=refractory,
                                 max_refractory=self.max_refractory,
                                 method=self.method,
                                 clock=Clock(dt=self.dt))

        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        # Injects current in consecutive subgroups, where I_offset have the same value
        # on successive intervals
        k = -1
        for i in hstack(
            (nonzero(diff(self.I_offset))[0], len(self.I_offset) - 1)):
            I_offset_subgroup_value = self.I_offset[i]
            I_offset_subgroup_length = i - k
            sliced_subgroup = self.group.subgroup(I_offset_subgroup_length)
            input_sliced_values = self.input[
                I_offset_subgroup_value:I_offset_subgroup_value +
                self.total_steps]
            sliced_subgroup.set_var_by_array(
                self.input_var,
                TimedArray(input_sliced_values, clock=self.group.clock))
            k = i

        if self.use_gpu:
            # Select integration scheme according to method
            if self.method == 'Euler': scheme = euler_scheme
            elif self.method == 'RK': scheme = rk2_scheme
            elif self.method == 'exponential_Euler': scheme = exp_euler_scheme
            else:
                raise Exception(
                    "The numerical integration method is not valid")

            self.mf = GPUModelFitting(self.group,
                                      self.model,
                                      self.input,
                                      self.I_offset,
                                      self.spiketimes,
                                      self.spiketimes_offset,
                                      zeros(self.neurons),
                                      0 * ms,
                                      self.delta,
                                      precision=self.precision,
                                      scheme=scheme)
        else:
            self.cc = CoincidenceCounter(self.group,
                                         self.spiketimes,
                                         self.spiketimes_offset,
                                         onset=self.onset,
                                         delta=self.delta)

    def prepare_data(self):
        """
        Generates I_offset, spiketimes, spiketimes_offset from data,
        and also target_length and target_rates.
        
        The neurons are first grouped by time slice : there are group_size*group_count
        per group/time slice
        Within each time slice, the neurons are grouped by target train : there are
        group_size neurons per group/target train
        """
        # Generates I_offset
        self.I_offset = zeros(self.N, dtype=int)
        for slice in range(self.slices):
            self.I_offset[self.neurons * slice:self.neurons *
                          (slice + 1)] = self.sliced_steps * slice

        # Generates spiketimes, spiketimes_offset, target_length, target_rates
        i, t = zip(*self.data)
        i = array(i)
        t = array(t)
        alls = []
        n = 0
        pointers = []
        dt = self.dt

        target_length = []
        target_rates = []
        model_target = []
        group_index = 0

        neurons_in_group = self.subpopsize
        for j in xrange(self.groups):
            #            neurons_in_group = self.groups[j] # number of neurons in the current group and current worker
            s = sort(t[i == j])
            target_length.extend([len(s)] * neurons_in_group)
            target_rates.extend([firing_rate(s)] * neurons_in_group)

            for k in xrange(self.slices):
                # first sliced group : 0...0, second_train...second_train, ...
                # second sliced group : first_train_second_slice...first_train_second_slice, second_train_second_slice...
                spikeindices = (s >= k * self.sliced_steps * dt) & (
                    s < (k + 1) * self.sliced_steps * dt
                )  # spikes targeted by sliced neuron number k, for target j
                targeted_spikes = s[
                    spikeindices] - k * self.sliced_steps * dt + self.overlap_steps * dt  # targeted spikes in the "local clock" for sliced neuron k
                targeted_spikes = hstack((-1 * second, targeted_spikes,
                                          self.sliced_duration + 1 * second))
                model_target.extend([k + group_index * self.slices] *
                                    neurons_in_group)
                alls.append(targeted_spikes)
                pointers.append(n)
                n += len(targeted_spikes)
            group_index += 1
        pointers = array(pointers, dtype=int)
        model_target = array(hstack(model_target), dtype=int)

        self.spiketimes = hstack(alls)
        self.spiketimes_offset = pointers[
            model_target]  # [pointers[i] for i in model_target]

        # Duplicates each target_length value 'group_size' times so that target_length[i]
        # is the length of the train targeted by neuron i
        self.target_length = array(target_length, dtype=int)
        self.target_rates = array(target_rates)

    def evaluate(self, **param_values):
        """
        Use fitparams['delays'] to take delays into account
        Use fitparams['refractory'] to take refractory into account
        """
        delays = param_values.pop('delays', zeros(self.neurons))
        refractory = param_values.pop('refractory', zeros(self.neurons))

        # kron spike delays
        delays = kron(delays, ones(self.slices))
        refractory = kron(refractory, ones(self.slices))

        # Sets the parameter values in the NeuronGroup object
        self.group.reinit()
        for param, value in param_values.iteritems():
            self.group.state(param)[:] = kron(value, ones(
                self.slices))  # kron param_values if slicing

        # Reinitializes the model variables
        if self.initial_values is not None:
            for param, value in self.initial_values.iteritems():
                self.group.state(param)[:] = value

        if self.use_gpu:
            # Reinitializes the simulation object
            self.mf.reinit_vars(self.input, self.I_offset, self.spiketimes,
                                self.spiketimes_offset, delays, refractory)
            # LAUNCHES the simulation on the GPU
            self.mf.launch(self.duration, self.stepsize)
            coincidence_count = self.mf.coincidence_count
            spike_count = self.mf.spike_count
        else:
            # set the refractory period
            if self.max_refractory is not None:
                self.group.refractory = refractory

            self.cc = CoincidenceCounter(self.group,
                                         self.spiketimes,
                                         self.spiketimes_offset,
                                         onset=self.onset,
                                         delta=self.delta)
            # Sets the spike delay values
            self.cc.spikedelays = delays
            # Reinitializes the simulation objects
            self.group.clock.reinit()
            #            self.cc.reinit()
            net = Network(self.group, self.cc)
            # LAUNCHES the simulation on the CPU
            net.run(self.duration)
            coincidence_count = self.cc.coincidences
            spike_count = self.cc.model_length

        coincidence_count = sum(reshape(coincidence_count, (self.slices, -1)),
                                axis=0)
        spike_count = sum(reshape(spike_count, (self.slices, -1)), axis=0)

        gamma = get_gamma_factor(coincidence_count, spike_count,
                                 self.target_length, self.target_rates,
                                 self.delta)

        return gamma