示例#1
0
    def add_cpp_allspikes(self):
        #start = dt.datetime.now().timestamp()

        torch.cuda.synchronize()

        # select all spikes from a previous iteration
        spike_times, spike_temps, spike_shifts, spike_heights = self.sample_spikes_allspikes(
        )

        torch.cuda.synchronize()

        # also fill in self-convolution traces with low energy so the
        #   spikes cannot be detected again (i.e. enforcing refractoriness)
        # Cat: TODO: investgiate whether putting the refractoriness back in is viable
        if self.refractoriness:
            deconv.refrac_fill(
                energy=self.obj_gpu,
                spike_times=spike_times,
                spike_ids=spike_temps,
                fill_length=self.refractory * 2 +
                1,  # variable fill length here
                fill_offset=self.subtraction_offset - 2 - self.refractory,
                fill_value=self.fill_value)

        torch.cuda.synchronize()

        # Add spikes back in;
        deconv.subtract_splines(self.obj_gpu, spike_times, spike_shifts,
                                spike_temps, self.coefficients,
                                -self.tempScaling * spike_heights)

        torch.cuda.synchronize()

        return
示例#2
0
    def add_cpp_allspikes(self, idx_iter):
        #start = dt.datetime.now().timestamp()

        torch.cuda.synchronize()

        # select randomly 10% of spikes from previous deconv;
        #spike_times, spike_temps, spike_shifts, flag = self.sample_spikes(idx_iter)

        # select all spikes from a previous iteration
        spike_times, spike_temps, spike_shifts, flag = self.sample_spikes_allspikes(
            idx_iter)

        torch.cuda.synchronize()

        if flag == False:
            return

        # also fill in self-convolution traces with low energy so the
        #   spikes cannot be detected again (i.e. enforcing refractoriness)
        # Cat: TODO: investgiate whether putting the refractoriness back in is viable
        if self.refractoriness:
            deconv.refrac_fill(
                energy=self.obj_gpu,
                spike_times=spike_times,
                spike_ids=spike_temps,
                #fill_length=self.n_time,  # variable fill length here
                #fill_offset=self.n_time//2,       # again giving flexibility as to where you want the fill to start/end (when combined with preceeding arg
                fill_length=self.refractory * 2 +
                1,  # variable fill length here
                fill_offset=self.n_time // 2 + self.refractory //
                2,  # again giving flexibility as to where you want the fill to start/end (when combined with preceeding arg
                fill_value=self.fill_value)

            # deconv.subtract_spikes(data=self.obj_gpu,
            # spike_times=spike_times,
            # spike_temps=spike_temps,
            # templates=self.templates_cpp_refractory_add,
            # do_refrac_fill = False,
            # refrac_fill_val = -1e10)

        torch.cuda.synchronize()

        # Add spikes back in;
        deconv.subtract_splines(self.obj_gpu, spike_times, spike_shifts,
                                spike_temps, self.coefficients,
                                -self.tempScaling)

        torch.cuda.synchronize()

        return
示例#3
0
    def subtract_cpp(self):

        start = dt.datetime.now().timestamp()

        torch.cuda.synchronize()

        spike_times = self.spike_times.squeeze() - self.lockout_window
        spike_temps = self.neuron_ids.squeeze()

        # zero out shifts if superres shift turned off
        # Cat: TODO: remove this computation altogether if not required;
        #           will save some time.
        if self.superres_shift == False:
            self.xshifts = self.xshifts * 0

        # if single spike, wrap it in list
        # Cat: TODO make this faster/pythonic
        if self.spike_times.size()[0] == 1:
            spike_times = spike_times[None]
            spike_temps = spike_temps[None]

        deconv.subtract_splines(self.obj_gpu, spike_times, self.xshifts,
                                spike_temps, self.coefficients,
                                self.tempScaling)

        torch.cuda.synchronize()

        # also fill in self-convolution traces with low energy so the
        #   spikes cannot be detected again (i.e. enforcing refractoriness)
        # Cat: TODO: read from CONFIG

        if self.refractoriness:
            #print ("filling in timesteps: ", self.n_time)
            deconv.refrac_fill(
                energy=self.obj_gpu,
                spike_times=spike_times,
                spike_ids=spike_temps,
                #fill_length=self.n_time,  # variable fill length here
                #fill_offset=self.n_time//2,       # again giving flexibility as to where you want the fill to start/end (when combined with preceeding arg
                fill_length=self.refractory * 2 +
                1,  # variable fill length here
                fill_offset=self.n_time // 2 + self.refractory //
                2,  # again giving flexibility as to where you want the fill to start/end (when combined with preceeding arg
                fill_value=-self.fill_value)

        torch.cuda.synchronize()

        return (dt.datetime.now().timestamp() - start)
示例#4
0
    def subtract_cpp(self):

        start = dt.datetime.now().timestamp()

        torch.cuda.synchronize()

        if False:
            self.spike_times = self.spike_times[:1]
            self.neuron_ids = self.neuron_ids[:1]
            self.xshifts = self.xshifts[:1]
            self.heights = self.heights[:1]
            self.obj_gpu *= 0.

        #spike_times = self.spike_times.squeeze()-self.lockout_window
        spike_times = self.spike_times.squeeze() - self.subtraction_offset
        spike_temps = self.neuron_ids.squeeze()

        # zero out shifts if superres shift turned off
        # Cat: TODO: remove this computation altogether if not required;
        #           will save some time.
        if self.superres_shift == False:
            self.xshifts = self.xshifts * 0

        # if single spike, wrap it in list
        # Cat: TODO make this faster/pythonic

        if self.spike_times.size()[0] == 1:
            spike_times = spike_times[None]
            spike_temps = spike_temps[None]

        #print ("spke_times: ", spike_times, spike_times)
        #print ("spke_times: ", spike_times[:20], spike_times[-20:])

        # save metadata
        if False:
            if self.n_iter < 500:
                self.objectives_dir = os.path.join(self.out_dir, 'objectives')
                if not os.path.isdir(self.objectives_dir):
                    os.mkdir(self.objectives_dir)

                np.save(
                    self.out_dir + '/objectives/spike_times_inside_' +
                    str(self.chunk_id) + "_iter_" + str(self.n_iter) + '.npy',
                    spike_times.squeeze().cpu().data.numpy())
                np.save(
                    self.out_dir + '/objectives/spike_ids_inside_' +
                    str(self.chunk_id) + "_iter_" + str(self.n_iter) + '.npy',
                    spike_temps.squeeze().cpu().data.numpy())
                np.save(
                    self.out_dir + '/objectives/obj_gpu_' +
                    str(self.chunk_id) + "_iter_" + str(self.n_iter) + '.npy',
                    self.obj_gpu.cpu().data.numpy())
                np.save(
                    self.out_dir + '/objectives/shifts_' + str(self.chunk_id) +
                    "_iter_" + str(self.n_iter) + '.npy',
                    self.xshifts.cpu().data.numpy())
                np.save(
                    self.out_dir + '/objectives/tempScaling_' +
                    str(self.chunk_id) + "_iter_" + str(self.n_iter) + '.npy',
                    self.tempScaling)
                np.save(
                    self.out_dir + '/objectives/heights_' +
                    str(self.chunk_id) + "_iter_" + str(self.n_iter) + '.npy',
                    self.heights.cpu().data.numpy())

                if False:
                    for k in range(len(self.coefficients)):
                        np.save(
                            self.out_dir + '/objectives/coefficients_' +
                            str(k) + "_" + str(self.chunk_id) + "_iter_" +
                            str(self.n_iter) + '.npy',
                            self.coefficients[k].data.cpu().numpy())
                    print("spike_times: ", spike_times.shape)
                    print("spike_times: ", type(spike_times.data[0].item()))
                    print("spike_temps: ", spike_temps.shape)
                    print("spike_temps: ", type(spike_temps.data[0].item()))
                    print("self.obj_gpu: ", self.obj_gpu.shape)
                    print("self.obj_gpu: ",
                          type(self.obj_gpu.data[0][0].item()))
                    print("self.xshifts: ", self.xshifts.shape)
                    print("self.xshifts: ", type(self.xshifts.data[0].item()))
                    print("self.tempScaling: ", self.tempScaling)
                    print("self.heights: ", self.heights.shape)
                    print("self.heights: ", type(self.heights.data[0].item()))
                    print("self.coefficients[k]: ",
                          self.coefficients[k].data.shape)
                    print("self.coefficients[k]: ",
                          type(self.coefficients[k].data[0][0].item()))
            else:
                quit()

        #self.obj_gpu = self.obj_gpu*0.
        #spike_times = spike_times -99
        deconv.subtract_splines(self.obj_gpu, spike_times, self.xshifts,
                                spike_temps, self.coefficients,
                                self.tempScaling * self.heights)

        torch.cuda.synchronize()

        # also fill in self-convolution traces with low energy so the
        #   spikes cannot be detected again (i.e. enforcing refractoriness)
        # Cat: TODO: read from CONFIG

        if self.refractoriness:
            #print ("filling in timesteps: ", self.n_time)
            deconv.refrac_fill(
                energy=self.obj_gpu,
                spike_times=spike_times,
                spike_ids=spike_temps,
                fill_length=self.refractory * 2 +
                1,  # variable fill length here
                fill_offset=self.subtraction_offset - 2 - self.refractory,
                fill_value=-self.fill_value)

        torch.cuda.synchronize()

        return (dt.datetime.now().timestamp() - start)