예제 #1
0
    def get_bspline_coeffs(self):

        n_data, n_times, n_channels = self.templates_aligned.shape

        channels = torch.arange(n_channels).cuda()
        temps_torch = torch.from_numpy(-(self.templates_aligned.transpose(0, 2, 1))).cuda()

        temp_cpp = deconv.BatchedTemplates([deconv.Template(temp, channels) for temp in temps_torch])
        self.coeffs = deconv.BatchedTemplates([transform_template(template) for template in temp_cpp])
예제 #2
0
    def make_bsplines_parallel(self):
        
        #print (self.temps_gpu.shape, len(self.template_inds), self.template_inds[0].shape)
        self.temp_temp_cpp = deconv.BatchedTemplates([deconv.Template(nzData, nzInd) for nzData, nzInd in zip(self.temps_gpu, self.template_inds)])

        #print ("  making template bsplines")
        #fname = os.path.join(data_dir,'voltage_bsplines_'+
        #          str((self.chunk_id+1)*self.CONFIG.resources.n_sec_chunk_gpu) + '.npy')
        
        #if os.path.exists(fname)==False:
        if True:
            
            # Cat; TODO: don't need to pass tensor/cuda templates to parallel function
            #            - can just pass the raw cpu templates
            # multi-core bsplines
            if self.CONFIG.resources.multi_processing:
                templates_cpu = []
                for template in self.temp_temp_cpp:
                    templates_cpu.append(template.data.cpu().numpy())

                coefficients = parmap.map(transform_template_parallel, templates_cpu, 
                                            processes=self.CONFIG.resources.n_processors,
                                            pm_pbar=False)
            # single core
            else:
                coefficients = []
                for template in self.temp_temp_cpp:
                    template_cpu = template.data.cpu().numpy()
                    coefficients.append(transform_template_parallel(template_cpu))
            
            #np.save(fname, coefficients)
       # else:
         #   print ("  ... loading coefficients from disk")
        #    coefficients = np.load(fname)
        
        #print ("  ... moving coefficients to cuda objects")
        coefficients_cuda = []
        for p in range(len(coefficients)):
            coefficients_cuda.append(deconv.Template(torch.from_numpy(coefficients[p]).cuda(), self.temp_temp_cpp[p].indices))
            # print ('self.temp_temp_cpp[p].indices: ', self.temp_temp_cpp[p].indices)
            # print ("self.vis_units: ", self.vis_units[p])
            # coefficients_cuda.append(deconv.Template(torch.from_numpy(coefficients[p]).cuda(), self.vis_units[p]))
        
        self.coefficients = deconv.BatchedTemplates(coefficients_cuda)

        del self.template_inds
        del self.temps_gpu
        del self.temp_temp_cpp
        del coefficients_cuda
        del coefficients
        torch.cuda.empty_cache()
예제 #3
0
    def data_to_gpu(self):

        self.peak_pts = torch.arange(-1, +2).cuda()

        #norm
        norm = np.sum(np.square(self.temps), (0, 1))
        self.norms = torch.from_numpy(norm).float().cuda()

        # spatial and temporal component of svd
        self.spat_comp = torch.from_numpy(self.spat_comp).float().cuda()
        self.temp_comp = torch.from_numpy(self.temp_comp).float().cuda()

        print("  ... moving coefficients to cuda objects")

        # load vis units
        fname_vis_units = os.path.join(self.init_dir, 'vis_units.npy')
        vis_units = np.load(fname_vis_units, allow_pickle=True)

        #
        coefficients_cuda = []
        for p in range(len(self.coefficients)):
            coefficients_cuda.append(
                deconv.Template(
                    torch.from_numpy(self.coefficients[p]).float().cuda(),
                    torch.from_numpy(vis_units[p]).long().cuda()))
        self.coefficients = deconv.BatchedTemplates(coefficients_cuda)

        del coefficients_cuda
        torch.cuda.empty_cache()

        if self.fit_height:
            self.large_units = torch.from_numpy(self.large_units).cuda()
예제 #4
0
    def templates_to_bsplines(self):

        print ("  making template bsplines")
        fname = os.path.join(self.dir_bsplines,'bsplines_'+
                  str((self.chunk_id+1)*self.CONFIG.deconvolution.template_update_time) + '.npy')
        print ("Bspline file: ", fname)
        
        if os.path.exists(fname)==False:
        #if True:
            
            # Cat; TODO: don't need to pass tensor/cuda templates to parallel function
            #            - can just pass the raw cpu templates
            # multi-core bsplines
            if self.CONFIG.resources.multi_processing:
                templates_cpu = []
                for template in self.temp_temp_cpp:
                    templates_cpu.append(template.data.cpu().numpy())

                import parmap
                coefficients = parmap.map(transform_template_parallel, templates_cpu, 
                                            processes=self.CONFIG.resources.n_processors,
                                            pm_pbar=False)
            # single core
            else:
                coefficients = []
                for template in self.temp_temp_cpp:
                    template_cpu = template.data.cpu().numpy()
                    coefficients.append(transform_template_parallel(template_cpu))
            
            #np.save(fname, coefficients)
        else:
            print ("  ... loading coefficients from disk")
            coefficients = np.load(fname, allow_pickle=True)

        # # 
        print (" recomputed coefficients: ", coefficients[0].shape)
        print (" recomputed coefficients: ", coefficients[0])

        coefficients = np.load(fname)
        print (" loaded coefficients: ", coefficients[0].shape)
        print (" loaded coefficients: ", coefficients[0])
        

        print ("  ... moving coefficients to cuda objects")
        coefficients_cuda = []
        for p in range(len(coefficients)):
            coefficients_cuda.append(deconv.Template(torch.from_numpy(coefficients[p]).cuda(), self.temp_temp_cpp[p].indices))

        self.coefficients = deconv.BatchedTemplates(coefficients_cuda)

        del self.temp_temp
        del self.temp_temp_cpp
        del coefficients_cuda
        del coefficients
        torch.cuda.empty_cache()
예제 #5
0
    def initialize_cpp(self):

        # make a list of pairwise batched temp_temp and their vis_units
        # Cat: TODO: this isn't really required any longer;
        #            - the only thing required from this in parallel bsplines function is
        #              self.temp_temp_cpp.indices - is self.vis_units
        #
        self.temp_temp_cpp = deconv.BatchedTemplates([
            deconv.Template(nzData, nzInd)
            for nzData, nzInd in zip(self.temp_temp, self.vis_units)
        ])
예제 #6
0
    def make_bsplines(self):
        #print ("  making bsplines...")
        # make template objects
        self.templates = deconv.BatchedTemplates([
            deconv.Template(vals, inds)
            for vals, inds in zip(self.template_vals, self.template_inds)
        ])

        # make bspline objects
        def fit_spline(curve, knots=None, prepad=0, postpad=0, order=3):
            if knots is None:
                knots = np.arange(len(curve) + prepad + postpad)
            return splrep(knots,
                          np.pad(curve, (prepad, postpad), mode='symmetric'),
                          k=order)

        def transform_template(template,
                               knots=None,
                               prepad=7,
                               postpad=3,
                               order=3):
            if knots is None:
                knots = np.arange(len(template.data[0]) + prepad + postpad)
            splines = [
                fit_spline(curve,
                           knots=knots,
                           prepad=prepad,
                           postpad=postpad,
                           order=order)
                for curve in template.data.cpu().numpy()
            ]
            coefficients = np.array([
                spline[1][prepad - 1:-1 * (postpad + 1)] for spline in splines
            ],
                                    dtype='float32')
            return deconv.Template(
                torch.from_numpy(coefficients).cuda(), template.indices)

        # make bspline coefficients
        self.coefficients = deconv.BatchedTemplates(
            [transform_template(template) for template in self.templates])