Exemple #1
0
    def sense0(self, debug=False):
        image = self.cam.get_image()
        sb = self.search_boxes
        xr = np.zeros(self.search_boxes.x.shape)
        xr[:] = self.search_boxes.x[:]
        yr = np.zeros(self.search_boxes.y.shape)
        yr[:] = self.search_boxes.y[:]
        half_width = sb.half_width

        for iteration in range(self.centroiding_iterations):
            #QApplication.processEvents()
            msi = iteration == self.centroiding_iterations - 1
            if debug:
                plt.figure()
                plt.imshow(image)
                plt.title('iteration %d' % iteration)
            centroid.compute_centroids(
                spots_image=image,
                sb_x1_vec=sb.x1,
                sb_x2_vec=sb.x2,
                sb_y1_vec=sb.y1,
                sb_y2_vec=sb.y2,
                x_out=xr,
                y_out=yr,
                mean_intensity=self.box_means,
                maximum_intensity=self.box_maxes,
                minimum_intensity=self.box_mins,
                background_intensity=self.box_backgrounds,
                estimate_background=self.estimate_background,
                background_correction=self.background_correction,
                num_threads=1,
                modify_spots_image=msi)
            half_width -= self.iterative_centroiding_step
            sb = SearchBoxes(xr, yr, half_width)
        if debug:
            plt.figure()
            plt.imshow(image)
            plt.show()
            sys.exit()
        self.x_centroids[:] = xr[:]
        self.y_centroids[:] = yr[:]
        self.x_slopes = (self.x_centroids - self.search_boxes.x
                         ) * self.pixel_size_m / self.lenslet_focal_length_m
        self.y_slopes = (self.y_centroids - self.search_boxes.y
                         ) * self.pixel_size_m / self.lenslet_focal_length_m
        self.tilt = np.mean(self.x_slopes)
        self.tip = np.mean(self.y_slopes)
        if self.remove_tip_tilt:
            self.x_slopes -= self.tilt
            self.y_slopes -= self.tip
        self.image = image
        if self.reconstruct_wavefront:
            self.zernikes, self.wavefront, self.error = self.reconstructor.get_wavefront(
                self.x_slopes, self.y_slopes)
Exemple #2
0
 def sense(self):
     image = cam.get_image()
     sensor_mutex.lock()
     sb = self.search_boxes
     xr = np.zeros(self.search_boxes.x.shape)
     xr[:] = self.search_boxes.x[:]
     yr = np.zeros(self.search_boxes.y.shape)
     yr[:] = self.search_boxes.y[:]
     half_width = sb.half_width
     for iteration in range(self.centroiding_iterations):
         #QApplication.processEvents()
         msi = iteration==self.centroiding_iterations-1
         centroid.compute_centroids(spots_image=image,
                                    sb_x1_vec=sb.x1,
                                    sb_x2_vec=sb.x2,
                                    sb_y1_vec=sb.y1,
                                    sb_y2_vec=sb.y2,
                                    x_out=xr,
                                    y_out=yr,
                                    mean_intensity = self.box_means,
                                    maximum_intensity = self.box_maxes,
                                    minimum_intensity = self.box_mins,
                                    background_intensity = self.box_backgrounds,
                                    estimate_background = self.estimate_background,
                                    background_correction = self.background_correction,
                                    num_threads = 1,
                                    modify_spots_image = msi)
         half_width-=self.iterative_centroiding_step
         sb = SearchBoxes(xr,yr,half_width)
         
     self.x_centroids[:] = xr[:]
     self.y_centroids[:] = yr[:]
     self.x_slopes = (self.x_centroids-self.search_boxes.x)*self.pixel_size_m/self.lenslet_focal_length_m
     self.y_slopes = (self.y_centroids-self.search_boxes.y)*self.pixel_size_m/self.lenslet_focal_length_m
     self.tilt = np.mean(self.x_slopes)
     self.tip = np.mean(self.y_slopes)
     if self.remove_tip_tilt:
         self.x_slopes-=self.tilt
         self.y_slopes-=self.tip
     self.image = image
     if self.reconstruct_wavefront:
         self.zernikes,self.wavefront,self.error = self.reconstructor.get_wavefront(self.x_slopes,self.y_slopes)
     sensor_mutex.unlock()
Exemple #3
0
    def sense(self, debug=False):
        self.image = self.cam.get_image()

        centroid.estimate_backgrounds(
            spots_image=self.image,
            sb_x_vec=self.search_boxes.x,
            sb_y_vec=self.search_boxes.y,
            sb_bg_vec=self.box_backgrounds,
            sb_half_width_p=self.search_boxes.half_width)
        centroid.compute_centroids(
            spots_image=self.image,
            sb_x_vec=self.search_boxes.x,
            sb_y_vec=self.search_boxes.y,
            sb_bg_vec=self.box_backgrounds,
            sb_half_width_p=self.search_boxes.half_width,
            iterations_p=self.centroiding_iterations,
            iteration_step_px_p=self.iterative_centroiding_step,
            x_out=self.x_centroids,
            y_out=self.y_centroids,
            mean_intensity=self.box_means,
            maximum_intensity=self.box_maxes,
            minimum_intensity=self.box_mins,
            num_threads_p=1)

        self.x_slopes = (self.x_centroids - self.search_boxes.x
                         ) * self.pixel_size_m / self.lenslet_focal_length_m
        self.y_slopes = (self.y_centroids - self.search_boxes.y
                         ) * self.pixel_size_m / self.lenslet_focal_length_m
        self.tilt = np.mean(self.x_slopes)
        self.tip = np.mean(self.y_slopes)
        if self.remove_tip_tilt:
            self.x_slopes -= self.tilt
            self.y_slopes -= self.tip
        if self.reconstruct_wavefront:
            self.zernikes, self.wavefront, self.error = self.reconstructor.get_wavefront(
                self.x_slopes, self.y_slopes)
Exemple #4
0
    def sense(self,debug=False):

        if self.profile_update_method:
            self.sense_timer.tick('start')
            
        self.image = self.cam.get_image()

        if self.profile_update_method:
            self.sense_timer.tick('cam.get_image')
        
        if self.dark_subtract:
            self.image = self.image - self.dark_image

        self.image_min = self.image.min()
        self.image_mean = self.image.mean()
        self.image_max = self.image.max()
        
        if self.profile_update_method:
            self.sense_timer.tick('image stats')
            
        
        t0 = time.time()
        if not self.fast_centroiding:
            if self.estimate_background:
                centroid.estimate_backgrounds(spots_image=self.image,
                                              sb_x_vec = self.search_boxes.x,
                                              sb_y_vec = self.search_boxes.y,
                                              sb_bg_vec = self.box_backgrounds,
                                              sb_half_width_p = self.search_boxes.half_width)
                self.box_backgrounds = self.box_backgrounds + self.background_correction
                
            if self.profile_update_method:
                self.sense_timer.tick('estimate background')
            centroid.compute_centroids(spots_image=self.image,
                                       sb_x_vec = self.search_boxes.x,
                                       sb_y_vec = self.search_boxes.y,
                                       sb_bg_vec = self.box_backgrounds,
                                       sb_half_width_p = self.search_boxes.half_width,
                                       iterations_p = self.centroiding_iterations,
                                       iteration_step_px_p = self.iterative_centroiding_step,
                                       x_out = self.x_centroids,
                                       y_out = self.y_centroids,
                                       mean_intensity = self.box_means,
                                       maximum_intensity = self.box_maxes,
                                       minimum_intensity = self.box_mins,
                                       num_threads_p = 1)
            if self.profile_update_method:
                self.sense_timer.tick('centroid')
        else:
            centroid.fast_centroids(spots_image=self.image,
                                    sb_x_vec = self.search_boxes.x,
                                    sb_y_vec = self.search_boxes.y,
                                    sb_half_width_p = self.search_boxes.half_width,
                                    centroiding_half_width_p = self.centroiding_half_width,
                                    x_out = self.x_centroids,
                                    y_out = self.y_centroids,
                                    sb_max_vec = self.box_maxes,
                                    valid_vec = self.valid_centroids,
                                    verbose_p = 0,
                                    num_threads_p = 1)
        self.centroiding_time = time.time()-t0
        self.x_slopes = (self.x_centroids-self.search_boxes.x)*self.pixel_size_m/self.lenslet_focal_length_m
        self.y_slopes = (self.y_centroids-self.search_boxes.y)*self.pixel_size_m/self.lenslet_focal_length_m
        
        self.tilt = np.mean(self.x_slopes)
        self.tip = np.mean(self.y_slopes)
        
        if self.remove_tip_tilt:
            self.x_slopes-=self.tilt
            self.y_slopes-=self.tip

            
        if self.reconstruct_wavefront:
            self.zernikes,self.wavefront,self.error = self.reconstructor.get_wavefront(self.x_slopes,self.y_slopes)
            if self.profile_update_method:
                self.sense_timer.tick('reconstruct wavefront')
            
            
            self.filter_slopes = self.n_zernike_orders_corrected<self.reconstructor.N_orders
            
            if self.filter_slopes:

                # Outline of approach: the basic idea is to filter the residual error
                # slopes by Zernike mode before multiplying by the mirror command
                # matrix.
                # 1. multiply the slopes by a wavefront reconstructor matrix
                #    to get Zernike coefficients; these are already output by
                #    the call to self.reconstructor.get_wavefront above
                # 2. zero the desired modes
                # 3. multiply the modes by the inverse of that matrix, which is stored
                #    in the Reconstructor object as reconstructor.slope_matrix

                # convert the order into a number of terms:
                n_terms = self.reconstructor.Z.nm2j(self.n_zernike_orders_corrected,self.n_zernike_orders_corrected)

                # get the slope matrix (inverse of zernike matrix, which maps slopes onto zernikes)
                slope_matrix = self.reconstructor.slope_matrix

                # create a filtered set of zernike terms
                # not sure if we should zero piston here
                z_filt = np.zeros(len(self.zernikes))
                z_filt[:n_terms+1] = self.zernikes[:n_terms+1]
                
                zero_piston = True
                if zero_piston:
                    z_filt[0] = 0.0
                
                # filter the slopes, and assign them to this sensor object:
                filtered_slopes = np.dot(slope_matrix,z_filt)
                self.x_slopes = filtered_slopes[:self.n_lenslets]
                self.y_slopes = filtered_slopes[self.n_lenslets:]
            
        if self.profile_update_method:
            self.sense_timer.tick('end sense')
            self.sense_timer.tock()
            
        try:
            self.beeper.beep(self.error)
        except Exception as e:
            print e
            print self.error
            sys.exit()
Exemple #5
0
    return x1_vec, x2_vec, y1_vec, y2_vec


spots_images = sorted(
    glob.glob('/home/rjonnal/code/kungpao/data/spots/spots*.npy'))
x1v, x2v, y1v, y2v = build_searchbox_edges()
xcentroids = np.zeros((len(x1v)), dtype=np.float)
ycentroids = np.zeros((len(x1v)), dtype=np.float)

times = []
pp_times = []
for fn in spots_images:
    print fn
    im = np.load(fn)
    t0 = time()
    xcentroids, ycentroids = centroid.compute_centroids(
        im, x1v, x2v, y1v, y2v, xcentroids, ycentroids, True)
    times.append(time() - t0)
    t0 = time()
    pp_xcentroids, pp_ycentroids = pp_compute_centroids(im, x1v, x2v, y1v, y2v)
    pp_times.append(time() - t0)
    print pp_xcentroids[0], xcentroids[0]

    #assert (pp_xcentroids==xcentroids).all() and (pp_ycentroids==ycentroids).all()

cython_time = np.mean(times)
pp_time = np.mean(pp_times)

print 'pure python: %0.4f' % pp_time
print 'cython: %0.4f' % cython_time
print 'speedup: %0.1f x' % (pp_time / cython_time)
Exemple #6
0
        image[y + dy, x + dx] = np.int16(spot_intensity)

sb_x = np.array(sb_x, dtype=np.int16)
sb_y = np.array(sb_y, dtype=np.int16)

x_out = np.zeros(sb_x.shape)
y_out = np.zeros(sb_x.shape)

mean_intensity = np.zeros(sb_x.shape)
maximum_intensity = np.zeros(sb_x.shape)
minimum_intensity = np.zeros(sb_x.shape)
background0 = np.zeros(sb_x.shape)
background = np.zeros(sb_x.shape)

sb_half_width = (sb_width - 1) // 2

centroid.estimate_backgrounds(image, sb_x, sb_y, background, sb_half_width)

centroid.compute_centroids(image, sb_x, sb_y, background, sb_half_width,
                           iterations, iteration_step_px, x_out, y_out,
                           mean_intensity, maximum_intensity,
                           minimum_intensity, 1)

plt.imshow(image, cmap='gray')
plt.autoscale(False)
plt.plot(sb_x, sb_y, 'gs')
plt.plot(x_out, y_out, 'r+')

plt.show()
spots_images = sorted(glob.glob('/home/rjonnal/code/kungpao/data/spots/spots*.npy'))
x1v,x2v,y1v,y2v = build_searchbox_edges()
xcentroids = np.zeros((len(x1v)),dtype=np.float)
ycentroids = np.zeros((len(x1v)),dtype=np.float)
total_intensity = np.zeros((len(x1v)),dtype=np.float)
maximum_intensity = np.zeros((len(x1v)),dtype=np.float)
minimum_intensity = np.zeros((len(x1v)),dtype=np.float)
background_intensity = np.zeros((len(x1v)),dtype=np.float)

times = []
for fn in spots_images:
    print fn
    im = np.load(fn)
    t0 = time()
    xcentroids,ycentroids = centroid.compute_centroids(im,x1v,x2v,y1v,y2v,
                                                       xcentroids,
                                                       ycentroids,
                                                       total_intensity,
                                                       maximum_intensity,
                                                       minimum_intensity,
                                                       background_intensity)
    times.append(time()-t0)
    
    #assert (pp_xcentroids==xcentroids).all() and (pp_ycentroids==ycentroids).all()

cython_time = np.mean(times)

print 'cython: %0.4f'%cython_time

Exemple #8
0
    y1_vec = refy_vec - sb_width//2
    y2_vec = refy_vec + sb_width//2
    return x1_vec,x2_vec,y1_vec,y2_vec

spots_images = sorted(glob.glob('/home/rjonnal/code/kungpao/data/spots/spots*.npy'))
x1v,x2v,y1v,y2v = build_searchbox_edges()
xcentroids = np.zeros((len(x1v)),dtype=np.float)
ycentroids = np.zeros((len(x1v)),dtype=np.float)

times = []
pp_times = []
for fn in spots_images:
    print fn
    im = np.load(fn)
    t0 = time()
    xcentroids,ycentroids = centroid.compute_centroids(im,x1v,x2v,y1v,y2v,xcentroids,ycentroids,True)
    times.append(time()-t0)
    t0 = time()
    pp_xcentroids,pp_ycentroids = pp_compute_centroids(im,x1v,x2v,y1v,y2v)
    pp_times.append(time()-t0)
    print pp_xcentroids[0],xcentroids[0]
    
    #assert (pp_xcentroids==xcentroids).all() and (pp_ycentroids==ycentroids).all()

cython_time = np.mean(times)
pp_time = np.mean(pp_times)

print 'pure python: %0.4f'%pp_time
print 'cython: %0.4f'%cython_time
print 'speedup: %0.1f x'%(pp_time/cython_time)