Esempio n. 1
0
    def processFrame(self, frame):
        self.frames.append(frame)
        index = self.index

        # update index for next iteration
        self.index += 1

        # decide whether to move the stage
        finished = self.index >= self.positions.shape[0]
        if not finished:
            self.move = self.stage.moveTo(self.positions[self.index], 'slow')

        # calculate offset (while stage moves no next location)
        if index == 0:
            offset = (0, 0)
        else:
            compareIndex = max(0, index - 10)
            offset, _ = imreg_dft.translation(
                frame.getImage(), self.frames[compareIndex].getImage())
            px = self.camera.getPixelSize()
            offset = self.offsets[compareIndex] + offset.astype(
                float) * [px.x(), px.y()]
        self.offsets[index] = offset

        # finish up if there are no more positions
        if finished:
            pg.disconnect(self.camera.sigNewFrame, self.newFrame)
            self.analyze()
Esempio n. 2
0
def tran():
    # basedir = os.path.join('.', 'examples/banklogo')
    # the TEMPLATE
    # im0 = sp.misc.imread(os.path.join(basedir, "moban.jpg"), True)  # float32
    # the image to be transformed
    # im1 = sp.misc.imread(os.path.join(basedir, "bxz1.jpg"), True)

    # im3 = cv2.imread(os.path.join(basedir, "moban.jpg"))
    # im3 = cv2.cvtColor(im3,cv2.COLOR_BGR2GRAY)  # unit8
    #
    # im4 = im0.astype(dtype=np.uint8)
    # cv2.imshow('im0', im4)
    # cv2.waitKey(0)

    result = ird.translation(im0, im1)

    tvec = result["tvec"].round(4)
    # the Transformed IMaGe.
    timg = ird.transform_img(im1, tvec=tvec)

    # Maybe we don't want to show plots all the time
    ird.imshow(im0, im1, timg)
    plt.show()

    print("Translation is {}, success rate {:.4g}".format(
        tuple(tvec), result["success"]))
Esempio n. 3
0
    def align_videos(self, filenames, template_frame):
        """Return filenames of generated videos"""
        progress_global = QProgressDialog('Total progress aligning all files', 'Abort', 0, 100, self)
        progress_global.setAutoClose(True)
        progress_global.setMinimumDuration(0)
        def callback_global(x):
            progress_global.setValue(x * 100)
            QApplication.processEvents()
        callback_global(0)
        ret_filenames = []

        for i, filename in enumerate(filenames):
            callback_global(i / float(len(filenames)))
            progress_shifts = QProgressDialog('Finding best shifts for ' + filename, 'Abort', 0, 100, self)
            progress_shifts.setAutoClose(True)
            progress_shifts.setMinimumDuration(0)
            progress_apply = QProgressDialog('Applying shifts for ' + filename, 'Abort', 0, 100, self)
            progress_apply.setAutoClose(True)
            progress_apply.setMinimumDuration(0)
            def callback_apply(x):
                progress_apply.setValue(x * 100)
                QApplication.processEvents()
            progress_load = QProgressDialog('Loading ' + filename, 'Abort', 0, 100, self)
            progress_load.setAutoClose(True)
            progress_load.setMinimumDuration(0)
            def callback_load(x):
                progress_load.setValue(x * 100)
                QApplication.processEvents()
            frames = file_io.load_file(filename, callback_load)
            callback_load(1)

            reference_frame = frames[self.ref_no.value()]  # todo: this needs to be renamed... reference frame is already established as something else

            if not self.use_shift_checkbox.isChecked():
                if self.scaling_checkbox.isChecked() or self.rotation_checkbox.isChecked():
                    shift = ird.similarity(template_frame, reference_frame)
                    if not self.rotation_checkbox.isChecked():
                        shift['angle'] = 0.0
                    if not self.scaling_checkbox.isChecked():
                        shift['scale'] = 1.0
                else:
                    shift = ird.translation(template_frame, reference_frame)
                if progress_shifts.wasCanceled():
                    return
            else:
                shift = {'tvec': [self.tvec_y_sb.value(), self.tvec_x_sb.value()], 'angle': self.rotation_sb.value(),
                         'scale': self.scale_sb.value()}

            shifted_frames = self.apply_shifts(frames, shift, callback_apply)
            path = pfs.save_project(filename, self.project, shifted_frames, self.Defaults.manip, 'video')
            pfs.refresh_list(self.project, self.video_list, self.video_list_indices,
                             self.Defaults.list_display_type, self.toolbutton_values)
            ret_filenames.append(path)
        callback_global(1)
        return ret_filenames
Esempio n. 4
0
    def align_videos(self, filenames, template_frame):
        """Return filenames of generated videos"""
        progress_global = QProgressDialog('Total progress aligning all files', 'Abort', 0, 100, self)
        progress_global.setAutoClose(True)
        progress_global.setMinimumDuration(0)
        def callback_global(x):
            progress_global.setValue(x * 100)
            QApplication.processEvents()
        callback_global(0)
        ret_filenames = []

        for i, filename in enumerate(filenames):
            callback_global(i / float(len(filenames)))
            progress_shifts = QProgressDialog('Finding best shifts for ' + filename, 'Abort', 0, 100, self)
            progress_shifts.setAutoClose(True)
            progress_shifts.setMinimumDuration(0)
            progress_apply = QProgressDialog('Applying shifts for ' + filename, 'Abort', 0, 100, self)
            progress_apply.setAutoClose(True)
            progress_apply.setMinimumDuration(0)
            def callback_apply(x):
                progress_apply.setValue(x * 100)
                QApplication.processEvents()
            progress_load = QProgressDialog('Loading ' + filename, 'Abort', 0, 100, self)
            progress_load.setAutoClose(True)
            progress_load.setMinimumDuration(0)
            def callback_load(x):
                progress_load.setValue(x * 100)
                QApplication.processEvents()
            frames = file_io.load_file(filename, callback_load)
            callback_load(1)

            reference_frame = frames[self.ref_no.value()]  # todo: this needs to be renamed... reference frame is already established as something else

            if not self.use_shift_checkbox.isChecked():
                if self.scaling_checkbox.isChecked() or self.rotation_checkbox.isChecked():
                    shift = ird.similarity(template_frame, reference_frame)
                    if not self.rotation_checkbox.isChecked():
                        shift['angle'] = 0.0
                    if not self.scaling_checkbox.isChecked():
                        shift['scale'] = 1.0
                else:
                    shift = ird.translation(template_frame, reference_frame)
                if progress_shifts.wasCanceled():
                    return
            else:
                shift = {'tvec': [self.tvec_y_sb.value(), self.tvec_x_sb.value()], 'angle': self.rotation_sb.value(),
                         'scale': self.scale_sb.value()}

            shifted_frames = self.apply_shifts(frames, shift, callback_apply)
            path = pfs.save_project(filename, self.project, shifted_frames, self.Defaults.manip, 'video')
            pfs.refresh_list(self.project, self.video_list, self.video_list_indices,
                             self.Defaults.list_display_type, self.toolbutton_values)
            ret_filenames.append(path)
        callback_global(1)
        return ret_filenames
Esempio n. 5
0
 def compute_shift(self, ref_frame, frame):
     if self.scaling_checkbox.isChecked() or self.rotation_checkbox.isChecked():
         shift = ird.similarity(ref_frame, frame)
         if not self.rotation_checkbox.isChecked():
             shift['angle'] = 0.0
         if not self.scaling_checkbox.isChecked():
             shift['scale'] = 1.0
     else:
         shift = ird.translation(ref_frame, frame)
         shift['scale'] = 1.0
     return shift
Esempio n. 6
0
 def compute_shift(self, ref_frame, frame):
     if self.scaling_checkbox.isChecked() or self.rotation_checkbox.isChecked():
         shift = ird.similarity(ref_frame, frame)
         if not self.rotation_checkbox.isChecked():
             shift['angle'] = 0.0
         if not self.scaling_checkbox.isChecked():
             shift['scale'] = 1.0
     else:
         shift = ird.translation(ref_frame, frame)
         shift['scale'] = 1.0
     return shift
Esempio n. 7
0
 def compute_shifts(self, template_frame, frames, progress_shifts):
     def callback_shifts(x):
         progress_shifts.setValue(x * 100)
         QApplication.processEvents()
     results = []
     for i, frame in enumerate(frames):
         if progress_shifts.wasCanceled():
             return
         callback_shifts(i / float(len(frames)))
         results = results + [ird.translation(template_frame, frame)]
     callback_shifts(1)
     return results
Esempio n. 8
0
 def compute_shifts(self, template_frame, frames, progress_shifts):
     def callback_shifts(x):
         progress_shifts.setValue(x * 100)
         QApplication.processEvents()
     results = []
     for i, frame in enumerate(frames):
         if progress_shifts.wasCanceled():
             return
         callback_shifts(i / float(len(frames)))
         results = results + [ird.translation(template_frame, frame)]
     callback_shifts(1)
     return results
Esempio n. 9
0
    def find_shift(self, master_stack, stack_to_move):
        """Finds the necessary shift to correct shift_to_move in order to match
        master stack.

        Parameters
        ----------
        master_stack : numpy.ndarray 2D
            Image to use as reference
        stack_to_move : numpy.ndarray 2D
            Image that is to be shifted to match the reference
        """
        result = ird.translation(master_stack, stack_to_move)
        self.tvec = [int(this) for this in result['tvec']]
Esempio n. 10
0
    def RegisterStack(self):
        
        # self.imageData[0,:,:] *= 0.33
        # self.imageData[1,:,:] *= 0.66
        # self.imageData[-1,:,:] *= 0.33
        # self.imageData[-2,:,:] *= 0.66

        # flatten stacks
        m = self.imageData.max(axis=0)
        print 'shape: ', m.shape
        nreg = self.imageData.shape[0]
        print 'num reg:', nreg
        ireg = 10 # int(nreg/2)  # get one near the start of the sequence.
        print 'ireg: ', ireg
        # correct for lateral motion
        #off = imreg_dft.translation(self.imageData[ireg], self.imageData[0])
        # print 'off', off
        off = [imreg_dft.translation(self.imageData[ireg], self.imageData[i])['tvec'] for i in range(0, self.imageData.shape[0])]
        # print 'off', off
        offt = np.array(off).T

        # find boundaries of outer rectangle including all images as registered
        minx = np.min(offt[0])
        maxx = np.max(offt[0])
        miny = np.min(offt[1])
        maxy = np.max(offt[1])
        print 'shape: ', m.shape
        print 'min/max x: ', minx, maxx
        print 'min/max y: ', miny, maxy
        # build canvas
        canvas = np.zeros(shape=(self.imageData.shape[0], self.imageData.shape[1]-minx+maxx,
            self.imageData.shape[2]-miny+maxy), dtype=self.imageData.dtype)

        # set initial image (offsets were computed relative to this, so it has no offset)
        # canvas[0, -minx:-minx+m.shape[1], -miny:-miny+m.shape[2]] = m[0]
        for i in range(0, self.imageData.shape[0]):
            ox = offt[0][i] - minx
            oy = offt[1][i] - miny
            canvas[i, ox:(ox+self.imageData.shape[1]), oy:(oy+self.imageData.shape[2])] = self.imageData[i]
        self.imageData = canvas
        self.updateAvgStdImage()
        #pg.image(self.imageData,title='image after registration')
    # def Image_Background(self):
    #     self.background=[]
    #     background = self.imageData[self.times<1]
    #     pg.image(np.mean(background,axis=0), title='average background ')

    #     self.background = np.mean(background,axis=0)
    #     return
        return
Esempio n. 11
0
    def RegisterStack(self):
        
        # self.imageData[0,:,:] *= 0.33
        # self.imageData[1,:,:] *= 0.66
        # self.imageData[-1,:,:] *= 0.33
        # self.imageData[-2,:,:] *= 0.66

        # flatten stacks
        m = self.imageData.max(axis=0)
        print 'shape: ', m.shape
        nreg = self.imageData.shape[0]
        print 'num reg:', nreg
        ireg = 10 # int(nreg/2)  # get one near the start of the sequence.
        print 'ireg: ', ireg
        # correct for lateral motion
        #off = imreg_dft.translation(self.imageData[ireg], self.imageData[0])
        # print 'off', off
        off = [imreg_dft.translation(self.imageData[ireg], self.imageData[i])['tvec'] for i in range(0, self.imageData.shape[0])]
        # print 'off', off
        offt = np.array(off).T

        # find boundaries of outer rectangle including all images as registered
        minx = np.min(offt[0])
        maxx = np.max(offt[0])
        miny = np.min(offt[1])
        maxy = np.max(offt[1])
        print 'shape: ', m.shape
        print 'min/max x: ', minx, maxx
        print 'min/max y: ', miny, maxy
        # build canvas
        canvas = np.zeros(shape=(self.imageData.shape[0], self.imageData.shape[1]-minx+maxx,
            self.imageData.shape[2]-miny+maxy), dtype=self.imageData.dtype)

        # set initial image (offsets were computed relative to this, so it has no offset)
        # canvas[0, -minx:-minx+m.shape[1], -miny:-miny+m.shape[2]] = m[0]
        for i in range(0, self.imageData.shape[0]):
            ox = offt[0][i] - minx
            oy = offt[1][i] - miny
            canvas[i, ox:(ox+self.imageData.shape[1]), oy:(oy+self.imageData.shape[2])] = self.imageData[i]
        self.imageData = canvas
        self.updateAvgStdImage()
        #pg.image(self.imageData,title='image after registration')
    # def Image_Background(self):
    #     self.background=[]
    #     background = self.imageData[self.times<1]
    #     pg.image(np.mean(background,axis=0), title='average background ')

    #     self.background = np.mean(background,axis=0)
    #     return
        return
Esempio n. 12
0
    def translation(self, img):
        if type(img.translation) != np.ndarray:
            # img on top
            top_img = self.image(img.row-1, img.col)
            # img to the left
            left_img = self.image(img.row, img.col-1)

            if top_img:
                img1 = imread(top_img.path)
                img2 = imread(img.path)
                y_translation, _ = imreg.translation(img1, img2)
            else:
                y_translation = (0, 0)

            if left_img:
                img1 = imread(left_img.path)
                img2 = imread(img.path)
                x_translation, _ = imreg.translation(img1, img2)
            else:
                x_translation = (0, 0)

            img.translation = np.array((y_translation, x_translation))
        return img.translation
Esempio n. 13
0
    def cache(self, path):
        Bs = [os.path.join(path, p) for p in os.listdir(path) if p == 'Histograms.tif']
        LRs = [os.path.join(path, p) for p in os.listdir(path) if p == 'WF_TMR_calibrated.tif']
        ImgBs, PathBs, ImgLRs, PathLRs= [], [], [], []
        for p in Bs:
            img = np.array(Image.open(p))
            img = np.expand_dims(img, axis=2) if img.ndim == 2 else img
            ImgBs.append(img)
            PathBs.append(p)

        for p in LRs:
            try:
                imgStack = Image.open(p)
                indexes = [i for i in range(imgStack.n_frames)]
                random.shuffle(indexes)
                c = min(len(indexes), 20)
                for i in indexes[:c]:
                    imgStack.seek(i)
                    img = np.array(imgStack)
                    dtype = img.dtype
                    assert img.ndim == 2
                    if self.drift_correction:
                        import imreg_dft as ird
                        from skimage import exposure
                        b = ImgBs[0][:, :, 0]
                        b = exposure.equalize_hist(b)
                        b = scipy.ndimage.filters.gaussian_filter(b, sigma=(6, 6))
                        b = scipy.misc.imresize(b, img.shape[:2])
                        ts = ird.translation(b, img)
                        tvec = ts["tvec"]
                        # the Transformed IMaGe.
                        img = ird.transform_img(img, tvec=tvec)
                    if self.scale_LR == True:
                        img = scipy.misc.imresize(img, ImgBs[0].shape[:2])
                    elif type(self.scale_LR) is list:
                        img = scipy.misc.imresize(img, self.scale_LR)
                    img = np.expand_dims(img, axis=2)
                    img = img.astype(dtype)
                    ImgLRs.append(img)
                    PathLRs.append(p)
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print('error when reading file ', p)
                import traceback, sys
                traceback.print_exc(file=sys.stdout)

        self.__cache[path] = { 'B': ImgBs, 'A':ImgLRs, 'path': path, 'pathB': PathBs, 'pathA': PathLRs}
        return True
Esempio n. 14
0
def compute_translation(path, src, target):
    '''
    Compute translation of src tile to target tile
    contained in /path/
    
    Inputs:
        path: folder containing tiles
        src: source tile in .png format
        target: target tile in .png format
    Outputs:
        tvec: translation vector measured in pixels
    '''
    im0 = sp.misc.imread(path + src, True)
    im1 = sp.misc.imread(path + target, True)

    result = ird.translation(im0, im1)
    tvec = result['tvec']

    return tvec
Esempio n. 15
0
def xcorr_hybes(hybe_dict, reg_ref='hybe1', bead_thresh=10000):
    """
    Find the translation xcorr between hybes.
    """
    tvecs = {}
    ref_img = hybe_dict[reg_ref]
    #     img_bg = gaussian_filter(ref_img, (10, 10))
    #     ref_hpass = ref_img-img_bg
    #     np.place(ref_hpass, ref_hpass<bead_thresh, 0.01)
    for h, img in hybe_dict.items():
        if h == reg_ref:
            tvecs[h] = (0, 0)
        else:
            #             img_bg = gaussian_filter(img, (10, 10))
            #             img_hpass = img-img_bg
            #             np.place(img_hpass, img_hpass<bead_thresh, 0.01)
            xcorr_result = ird.translation(ref_img, img)
            tvecs[h] = xcorr_result['tvec']
    return tvecs
Esempio n. 16
0
def calc_offset_points(points_1, points_2, shape, plot=False):
    "Calculate the offset between a pair of ordered points -- e.g., an xy list of star positions, and and xy list of model postns."
    "Returned offset is integer pixels as tuple (dy, dx)."
    
    diam_kernel = 5 # If this is 11, that is too big, and we gt the wrong answer. Very sensitive.

    image_1 = hbt.image_from_list_points(points_1, shape, diam_kernel)
    image_2 = hbt.image_from_list_points(points_2, shape, diam_kernel)
 
    t0,t1 = ird.translation(image_1, image_2) # Return shift, with t0 = (dy, dx). t1 is a flag or quality or something.
    (dy,dx) = t0
    
    if (plot):

        xrange = (0, shape[0])
        yrange = (0, shape[1])

        figs = plt.figure()
        ax1 = figs.add_subplot(1,2,1) # nrows, ncols, plotnum. Returns an 'axis'
        ax1.set_aspect('equal') # Need to explicitly set aspect ratio here, otherwise in a multi-plot, it will be rectangular

#        fig1 = plt.imshow(np.log(image_1))
        plt.plot(points_1[:,0], points_1[:,1], marker='o', color='lightgreen', markersize=4, ls='None', label = 'Photometric')
        plt.plot(points_2[:,0], points_2[:,1], marker='o', color='red', markersize=4, ls='None', label = 'Cat')
        plt.legend()
       
        plt.xlim(xrange)    # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.title('Raw')
        
        ax2 = figs.add_subplot(1,2,2) # nrows, ncols, plotnum. Returns an 'axis'
        plt.plot(points_1[:,0], points_1[:,1], marker='o', color='lightgreen', markersize=9, ls='None')
        plt.plot(points_2[:,0] + t0[1], points_2[:,1] + t0[0], marker='o', color='red', markersize=4, ls='None')
        ax2.set_aspect('equal')

        plt.xlim(xrange)    # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.title('Shifted, dx=' + repr(dx) + ', dy = ' + repr(dy))
        
        plt.show()
        
    return t0
Esempio n. 17
0
def comp_trans(src, target):
    '''
    Compute translation of src array to target array
    
    Inputs:
        src: numpy array representing source tile
        target: numpy array representing target tile
    Outputs:
        tvec: translation vector measured in pixels
    '''
    tvec = [0, 0]

    range_target = target.max() - target.min()
    if range_target > 0:
        try:
            result = ird.translation(src, target)
            tvec = result['tvec']
        except OverflowError:
            tvec = float('nan')

    return tvec
Esempio n. 18
0
def translation(template_array, sub_array, scale_exponent=0.4):
    """
    Aligns array 'sub_array' to the template 'template_array' using translational transformation.
    Args:
        template_array  : Template array which will be used as a reference for translational aligning
        sub_array       : Subject array which will be aligned with the template_array using translation
        scale_exponent  : Exponent to which the array will be scaled
    Returns:
        mod_array       : Modified subject array aligned using translational transformation
    """
    list_array = modify_array(np.array([template_array, sub_array]),
                              scale_exponent)
    template_arrayslice = list_array[0]
    sub_arrayslice = list_array[1]

    dict_shift = ird.translation(template_arrayslice, sub_arrayslice)
    list_shift = list(dict_shift['tvec'])
    mod_array = np.empty_like(sub_array)
    shift(sub_array, list_shift, output=mod_array, mode='nearest')

    print("Translation In X & Y = {0}".format(list_shift))

    return mod_array
Esempio n. 19
0
    def register(self, T, reg_ch):

        if T.ndim == 3:
            reg_ch = None

        def _reg(x):
            return x if reg_ch is None else x[reg_ch]

        R = [T[0]]

        print('Running drift correction...')

        for frame in tqdm(T[1:]):
            result = ird.translation(_reg(R[-1]), _reg(frame))
            if reg_ch is None:
                freg = ird.transform_img(frame, tvec=result["tvec"])
            else:
                freg = np.stack(
                    [ird.transform_img(c, tvec=result["tvec"]) for c in frame])
            R.append(freg)

        reg = np.stack(R)

        return reg
def navigate_image_stellar(im, wcs_in, name_catalog='', do_plot=True, method='fft', title=''):

    """
    Navigate frame based on stellar images.
    Result returns is pixel shift (dy, dx).
    WCS paramaters are returned, *and* modified in place.
    """
    
    import imreg_dft as ird
    from   astropy.wcs import WCS
    
    # from   astropy.vo.client import conesearch # Virtual Observatory, ie star catalogs   # DEPRECATED!
    from   astroquery.vo_conesearch import conesearch                 # New home of conesearch
    
# Inputs are the image array, and the WCS structure.
# This routine does not do any file IO. The image array and header must be already loaded.
# The image is assumed to be stretched properly s.t. stars can be found using DAOphot. 

    NUM_STARS_PHOT = 100  # How many stars to use from DAOPhot. For noisy images, DAO will find a lot of
                          # fake stars, so we need to crank this up higher than the # of cat stars.
    NUM_STARS_CAT  = 50  # How many stars to use from star catalog

    DO_GSC1     = False
    DO_GSC12     = True
    DO_USNOA2   = False
    
#==============================================================================
# Calculate the image radius, in radians, based on the size and the pixel scale
#==============================================================================

    dx_pix = hbt.sizex(im)
    dy_pix = hbt.sizey(im)
    radec_corner = wcs_in.wcs_pix2world(0, dy_pix/2, 0)
    radec_center = wcs_in.wcs_pix2world(dx_pix/2, dy_pix/2, 0)
    (ra_corner, dec_corner) = radec_corner
    (ra_center, dec_center) = radec_center
    
    radius_image = math.sqrt((dec_corner-dec_center)**2 + 
                             ((ra_corner-ra_center) / np.cos(dec_corner*hbt.d2r))**2) * hbt.d2r

    radius_search_deg = radius_image * hbt.r2d
    
# Read the WCS coordinates
           
    center_deg  = wcs_in.wcs.crval  # degrees. # crval is a two-element array of [RA, Dec], in degrees

# Stretch the image. This is just for display -- no processing.

    stretch_percent = 90
    stretch = astropy.visualization.PercentileInterval(stretch_percent)  # PI(90) scales array to 5th .. 95th %ile. 

# Display it

    if (do_plot):
        plt.imshow(stretch(im))

#==============================================================================
# Get stars from star catalogs     
#==============================================================================
    
    if (DO_GSC1):
        name_cat = u'The HST Guide Star Catalog, Version 1.1 (Lasker+ 1992) 1' # works, but 1' errors; investigating
        stars = conesearch.conesearch(center_deg, radius_search_deg, cache=True, catalog_db = name_cat)
        ra_stars  = np.array(stars.array['RAJ2000'])*hbt.d2r # Convert to radians
        dec_stars = np.array(stars.array['DEJ2000'])*hbt.d2r # Convert to radians
    #            table_stars = Table(stars.array.data)
    
    if (DO_GSC12):
#        name_cat = u'The HST Guide Star Catalog, Version 1.2 (Lasker+ 1996) 1'
        name_cat = u'Guide Star Catalog v2 1'                                       # Works from gobi, not tomato
        url_cat = 'http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&' # Works always
            
        with data.conf.set_temp('remote_timeout', 30): # This is the very strange syntax to set a timeout delay.
                                                       # The default is 3 seconds, and that times out often.
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")                                                          
                # stars = conesearch.conesearch(wcs_in.wcs.crval, radius_search_deg, cache=True, catalog_db = url_cat)

                # The various functions of conesearch/ConeSearch/etc are quite confusing, and are in flux.                
                # This line below seems to work. It does not allow an explicit catalog suggstion, but it does the job.
                
                c = astropy.coordinates.SkyCoord(wcs_in.wcs.crval[0], wcs_in.wcs.crval[1], unit='deg')
                stars = ConeSearch.query_region(c, f'{radius_search_deg} deg')
    
        ra_stars  = np.array(stars.array['ra'])*hbt.d2r  # Convert to radians
        dec_stars = np.array(stars.array['dec'])*hbt.d2r # Convert to radians
    
        mag       = np.array(stars.array['Mag'])
        
        print("Stars downloaded: N = {}; mag = {:.2f} .. {:.2f}".format(np.size(mag), np.nanmin(mag), np.nanmax(mag)))
        print("RA = {:.2f} .. {:.2f}".format(np.nanmin(ra_stars)*hbt.r2d, np.nanmax(ra_stars)*hbt.r2d))
        
        # Now sort by magnitude, and keep the 100 brightest
        # This is because this GSC catalog is huge -- typically 2000 stars in LORRI FOV.
        # We need to reduce its size to fit in our fixed astropy table string length.
    
        order = np.argsort(mag)
        order = np.array(order)[0:NUM_STARS_CAT]
    
        ra_stars = ra_stars[order]   # Returned as radians
        dec_stars = dec_stars[order]
    
    if (DO_USNOA2):  
        name_cat = u'The USNO-A2.0 Catalogue (Monet+ 1998) 1' # Works but gives stars down to v=17; I want to v=13 
        stars = conesearch.conesearch(wcs_in.wcs.crval, 0.3, cache=False, catalog_db = name_cat)
        table_stars = Table(stars.array.data)
        mask = table_stars['Bmag'] < 13
        table_stars_m = table_stars[mask]            
    
        ra_stars  = table_stars_m['RAJ2000']*hbt.d2r # Convert to radians
        dec_stars = table_stars_m['DEJ2000']*hbt.d2r # Convert to radians
    
    ra_stars_cat  = ra_stars
    dec_stars_cat = dec_stars

    radec_stars_cat        = np.transpose(np.array((ra_stars_cat, dec_stars_cat)))
    
    (x_stars_cat, y_stars_cat) = wcs_in.wcs_world2pix(
                                                      radec_stars_cat[:,0]*hbt.r2d, 
                                                      radec_stars_cat[:,1]*hbt.r2d, 0)   
    
    points_stars_cat = np.transpose((y_stars_cat, x_stars_cat))  # Yes, order is supposed to be (y,x)

#==============================================================================
# Use DAOphot to search the image for stars.
#==============================================================================
  
    points_stars_phot = hbt.find_stars(im, num=NUM_STARS_PHOT) # Returns N x 2 aray. 0 = Row = y; 1 = Column = x.
    
    y_stars_phot =(points_stars_phot[:,0]) # xy is correct -- see above
    x_stars_phot =(points_stars_phot[:,1]) # 

#==============================================================================
# Make a plot showing the DAO stars on the image
#==============================================================================

    color_phot = 'red'            # Color for stars found photometrically
    color_cat  = 'lightgreen'     # Color for stars in catalog
    
    DO_PLOT_DAO = False   # Plot an intermediate result?
    
    if (DO_PLOT_DAO):

        plt.imshow(stretch(im))

        plt.plot(x_stars_phot, y_stars_phot, linestyle='none', 
                 marker='o', markersize=9, mec=color_cat, mew=1, color='none', 
                 label = 'DAO photometric stars') # plot() uses x, y

        plt.plot(x_stars_cat, y_stars_cat, linestyle='none', 
                 marker='o', markersize=5, color='lightgreen', 
                 label = 'Cat stars') # plot() uses x, y        

        plt.title(title)
        plt.ylim((hbt.sizey(im)),0)
        plt.xlim((0,hbt.sizex(im)))
        plt.legend(loc = 'upper left')
        plt.show()

# Up til here, x and y are correct
    
#==============================================================================
# Look up the shift between the photometry and the star catalog. 
# Do this by making a pair of fake images, and then looking up image registration on them.
#==============================================================================

# I call this pointing process 'opnav'. 
# It is returned in order (y,x) because that is what imreg_dft uses, even though it is a bit weird.
    
    diam_kernel = 11  # How many pixels across are our synthetic stellar images? Should be odd number. Not critical.
    do_binary = True  # For the stellar images, do a binary 1/0 (recommended), or a pixel distance?

    shape = np.shape(im)   # Set shape of output array
    
    image_cat  = hbt.image_from_list_points(points_stars_cat,  shape, diam_kernel, do_binary=do_binary)
    image_phot = hbt.image_from_list_points(points_stars_phot, shape, diam_kernel, do_binary=do_binary)

    if (method == 'fft'):         # Very fast method

        # Set up a constraint for the fit. It should be different for 1x1 and 4x4.
        # For 1x1, it works well to be 100 pixels.

        if (hbt.sizex(im) == 1024):    # For LORRI 1x1
            constraint_tx    = (0,100) # Mean and stdev. i.e., returned value will be within stdev of mean.
            constraint_ty    = (0,100) 
            
        if (hbt.sizex(im) == 256):   # For LORRI 4x4 
            constraint_tx    = (0,25) # Mean and stdev. i.e., returned value will be within stdev of mean.
            constraint_ty    = (0,25)  
            
        constraint_angle = 0    # With one value, it is a fixed constraint.
        
        constraints = {'tx' : constraint_tx, 'ty' : constraint_ty, 'angle' : constraint_angle}
        ird.translation(image_cat, image_phot, constraints=constraints)
        
        (dy, dx) = ird.translation(image_cat, image_phot, constraints=constraints)['tvec']         
        dy_opnav = -dy
        dx_opnav = -dx

    if (method == 'bruteforce'):  # Very slow method

        ((dx, dy), mat)       = hbt.get_translation_images_bruteforce(image_cat, image_phot)
        dx_opnav = -dx
        dy_opnav = -dy
        
#==============================================================================
# Make a plot, showing DAO positions + catalog positions
#==============================================================================

    do_plot = True
    if (do_plot):
        
#        hbt.figsize((10,10))
        
        plt.imshow(stretch(im))
        
        # Plot the stars -- catalog, and DAO
        
        plt.plot(x_stars_cat + dx_opnav, y_stars_cat + dy_opnav, 
                 marker='o', ls='None', 
                 color=color_cat, alpha = 0.5, ms=12, mew=1, label = 'Cat Stars, adjusted')
        
        plt.plot(x_stars_cat, y_stars_cat, 
                 marker='o', ls='None', 
                 color=color_cat, alpha = 1, ms=4, mew=1, label = 'Cat Stars, raw')
                 
        plt.plot(x_stars_phot, y_stars_phot, 
                 marker='o', ls='None', 
                 color='none', markersize=10, mew=1, mec=color_phot, alpha = 1, label = 'DAOfind Stars')               
        
        plt.title('After navigation, with dx = {:.1f}, dy = {:.1f}, {}'.format(dx_opnav, dy_opnav, title))
        plt.legend()  # Draw legend. Might be irrel since remove() might keep it; not sure.
        
        plt.imshow(stretch(im))
        plt.show()

#==============================================================================
# Return results and exit
#==============================================================================

# Results are returned in terms of pixel offset and a revised WCS structure.
# I don't seem to be able to copy a WCS structure, so I modify the one in place!

# Get the pixel location of the center position

    crpix = wcs_in.wcs.crpix  # Center position, in pixels, old
    
# Get the new RA, Dec center of the array. It is just the old location, plus the offset
    
    ORIGIN_FORMAT = 1  # 0 for Numpy-style indexing, 1 for Fortran-style and FITS-style.
                       # So what do I used for FITS files in python? Experimentally, 1 is right and 0 is not.
    
    (ra_new, dec_new) = wcs_in.wcs_pix2world(crpix[0] - dx_opnav, crpix[1] - dy_opnav, ORIGIN_FORMAT)

    # Set it
    
    wcs_in.wcs.crval = (ra_new, dec_new)
    
    return(wcs_in, (dy_opnav, dx_opnav))    
Esempio n. 21
0
    def execute_primary_function(self, input_paths=None):
        """Return filenames of generated videos"""
        [reference_frame,
         to_align_paths] = self.get_alignment_inputs(input_paths)
        assert ([
            os.path.splitext(os.path.basename(path))[0]
            for path in to_align_paths
        ] == self.shift_table_data[self.Labels.shift_table_col1])

        progress_global = QProgressDialog('Total progress aligning all files',
                                          'Abort', 0, 100, self)
        progress_global.setAutoClose(True)
        progress_global.setMinimumDuration(0)

        def callback_global(x):
            progress_global.setValue(x * 100)
            QApplication.processEvents()

        callback_global(0)
        progress_global.canceled.connect(
            functools.partial(self.cancel_progress_dialog, progress_global))

        ret_filenames = []
        shifts = {}
        for i, filename in enumerate(to_align_paths):
            callback_global(i / float(len(to_align_paths)))
            progress_shifts = QProgressDialog(
                'Finding best shifts for ' + filename, 'Abort', 0, 100, self)
            progress_shifts.setAutoClose(True)
            progress_shifts.setMinimumDuration(0)
            progress_shifts.canceled.connect(
                functools.partial(self.cancel_progress_dialog,
                                  progress_shifts))

            progress_load = QProgressDialog('Loading ' + filename, 'Abort', 0,
                                            100, self)
            progress_load.setAutoClose(True)
            progress_load.setMinimumDuration(0)

            def callback_load(x):
                progress_load.setValue(x * 100)
                QApplication.processEvents()

            frames = file_io.load_file(filename, callback_load)
            callback_load(1)

            to_align_frame = frames[self.ref_no.value()]
            frame_no, h, w = frames.shape
            to_align_frame = np.reshape(to_align_frame, (1, h, w))
            to_align_frame = self.crop_border(
                self.spatial_filter(to_align_frame))[0]
            # to_align_frame = to_align_frame[0]

            if self.scaling_checkbox.isChecked(
            ) or self.rotation_checkbox.isChecked():
                shift = ird.similarity(reference_frame, to_align_frame)
                if not self.rotation_checkbox.isChecked():
                    shift['angle'] = 0.0
                if not self.scaling_checkbox.isChecked():
                    shift['scale'] = 1.0
            else:
                shift = ird.translation(reference_frame, to_align_frame)
            # if not self.use_shift_checkbox.isChecked():
            # else:
            #     shift = {'tvec': [self.tvec_y_sb.value(), self.tvec_x_sb.value()], 'angle': self.rotation_sb.value(),
            #              'scale': self.scale_sb.value()}
            shifts[filename] = shift

            # Apply the found shift (row i) to all stacks in row i
            for col_key in self.shift_table_data.keys():
                # i = row
                filename = os.path.normpath(
                    os.path.join(self.project.path,
                                 self.shift_table_data[col_key][i])) + '.npy'
                frames = file_io.load_file(filename, callback_load)
                callback_load(1)

                progress_apply = QProgressDialog(
                    'Applying shifts for ' + filename, 'Abort', 0, 100, self)
                progress_apply.setAutoClose(True)
                progress_apply.setMinimumDuration(0)

                def callback_apply(x):
                    progress_apply.setValue(x * 100)
                    QApplication.processEvents()

                progress_apply.canceled.connect(
                    functools.partial(self.cancel_progress_dialog,
                                      progress_apply))

                shifted_frames = self.apply_shifts(frames, shift,
                                                   callback_apply)
                path = pfs.save_project(filename, self.project, shifted_frames,
                                        self.Defaults.manip, 'video')
                pfs.refresh_list(self.project, self.video_list, [],
                                 self.Defaults.list_display_type,
                                 self.toolbutton_values)
                ret_filenames.append(path)
        callback_global(1)

        # save shifts to csv
        save_loc = QFileDialog.getSaveFileName(
            self, 'Save Shifts',
            QSettings().value('path_of_last_project'), '(*.csv)')[0]
        if save_loc:
            # fields = list(shifts[list(shifts.keys())[0]].keys())
            # for key in shifts.keys():
            #     for field in fields:
            #         if field not in ['angle', 'tvec', 'scale', 'success']:
            #             del shifts[key][field]
            #         else:
            #             shifts[key][field.encode('ascii')] = shifts[key][field]
            #             del shifts[key][field]
            # keys_copy = list(shifts.keys())
            # for key in keys_copy:
            #     shifts[key.encode('ascii')] = shifts[key]
            #     del shifts[key]
            # # for field in fields:
            # #     if field not in ["File aligned", 'angle', 'tvec', 'scale', 'success']:
            # #         for key in shifts.keys():
            # #             del shifts[key][field]
            # #     else:
            # #         shifts[]
            # keys_copy = list(shifts.keys())
            # for key in keys_copy:
            #     x, y = shifts[key][b'tvec']
            #     shifts[key][b'tvec-x'] = x
            #     shifts[key][b'tvec-y'] = y
            #     del shifts[key][b'tvec']

            fields = list(shifts[list(shifts.keys())[0]].keys())
            for key in list(shifts.keys()):
                for field in fields:
                    if field not in ['angle', 'tvec', 'scale', 'success']:
                        del shifts[key][field]
            for key in list(shifts.keys()):
                x, y = shifts[key]['tvec']
                shifts[key]['tvec-x'] = x
                shifts[key]['tvec-y'] = y
                del shifts[key]['tvec']

            fields = ["File aligned"] + list(shifts[list(
                shifts.keys())[0]].keys())
            with open(save_loc, "w", newline='') as f:
                w = csv.DictWriter(f, fields)
                w.writeheader()
                for k in shifts:
                    w.writerow(
                        {field: shifts[k].get(field) or k
                         for field in fields})

            # for key, val in sorted(shifts.items()):
            #     row = {"File aligned": key}
            #     row.update(val)
            #     w.writerow(row)

            # with open(save_loc, 'wb') as csv_file:
            #     writer = csv.writer(csv_file)
            #     for key, value in shifts.items():
            #         writer.writerow([key, value])

        return ret_filenames
Esempio n. 22
0
    def execute_primary_function(self, input_paths=None):
        """Return filenames of generated videos"""
        [reference_frame, to_align_paths] = self.get_alignment_inputs(input_paths)
        assert([os.path.splitext(os.path.basename(path))[0] for path in to_align_paths] ==
               self.shift_table_data[self.Labels.shift_table_col1])

        progress_global = QProgressDialog('Total progress aligning all files', 'Abort', 0, 100, self)
        progress_global.setAutoClose(True)
        progress_global.setMinimumDuration(0)
        def callback_global(x):
            progress_global.setValue(x * 100)
            QApplication.processEvents()
        callback_global(0)
        progress_global.canceled.connect(functools.partial(self.cancel_progress_dialog, progress_global))

        ret_filenames = []
        shifts = {}
        for i, filename in enumerate(to_align_paths):
            callback_global(i / float(len(to_align_paths)))
            progress_shifts = QProgressDialog('Finding best shifts for ' + filename, 'Abort', 0, 100, self)
            progress_shifts.setAutoClose(True)
            progress_shifts.setMinimumDuration(0)
            progress_shifts.canceled.connect(functools.partial(self.cancel_progress_dialog, progress_shifts))

            progress_load = QProgressDialog('Loading ' + filename, 'Abort', 0, 100, self)
            progress_load.setAutoClose(True)
            progress_load.setMinimumDuration(0)
            def callback_load(x):
                progress_load.setValue(x * 100)
                QApplication.processEvents()
            frames = file_io.load_file(filename, callback_load)
            callback_load(1)

            to_align_frame = frames[self.ref_no.value()]
            frame_no, h, w = frames.shape
            to_align_frame = np.reshape(to_align_frame, (1, h, w))
            to_align_frame = self.crop_border(self.spatial_filter(to_align_frame))[0]
            # to_align_frame = to_align_frame[0]

            if self.scaling_checkbox.isChecked() or self.rotation_checkbox.isChecked():
                shift = ird.similarity(reference_frame, to_align_frame)
                if not self.rotation_checkbox.isChecked():
                    shift['angle'] = 0.0
                if not self.scaling_checkbox.isChecked():
                    shift['scale'] = 1.0
            else:
                shift = ird.translation(reference_frame, to_align_frame)
            # if not self.use_shift_checkbox.isChecked():
            # else:
            #     shift = {'tvec': [self.tvec_y_sb.value(), self.tvec_x_sb.value()], 'angle': self.rotation_sb.value(),
            #              'scale': self.scale_sb.value()}
            shifts[filename] = shift

            # Apply the found shift (row i) to all stacks in row i
            for col_key in self.shift_table_data.keys():
                # i = row
                filename = os.path.normpath(os.path.join(self.project.path, self.shift_table_data[col_key][i])) + '.npy'
                frames = file_io.load_file(filename, callback_load)
                callback_load(1)

                progress_apply = QProgressDialog('Applying shifts for ' + filename, 'Abort', 0, 100, self)
                progress_apply.setAutoClose(True)
                progress_apply.setMinimumDuration(0)
                def callback_apply(x):
                    progress_apply.setValue(x * 100)
                    QApplication.processEvents()
                progress_apply.canceled.connect(functools.partial(self.cancel_progress_dialog, progress_apply))

                shifted_frames = self.apply_shifts(frames, shift, callback_apply)
                path = pfs.save_project(filename, self.project, shifted_frames, self.Defaults.manip, 'video')
                pfs.refresh_list(self.project, self.video_list, [],
                                 self.Defaults.list_display_type, self.toolbutton_values)
                ret_filenames.append(path)
        callback_global(1)

        # save shifts to csv
        save_loc = QFileDialog.getSaveFileName(self, 'Save Shifts', QSettings().value('path_of_last_project'),
                                               '(*.csv)')
        if save_loc:
            # fields = list(shifts[list(shifts.keys())[0]].keys())
            # for key in shifts.keys():
            #     for field in fields:
            #         if field not in ['angle', 'tvec', 'scale', 'success']:
            #             del shifts[key][field]
            #         else:
            #             shifts[key][field.encode('ascii')] = shifts[key][field]
            #             del shifts[key][field]
            # keys_copy = list(shifts.keys())
            # for key in keys_copy:
            #     shifts[key.encode('ascii')] = shifts[key]
            #     del shifts[key]
            # # for field in fields:
            # #     if field not in ["File aligned", 'angle', 'tvec', 'scale', 'success']:
            # #         for key in shifts.keys():
            # #             del shifts[key][field]
            # #     else:
            # #         shifts[]
            # keys_copy = list(shifts.keys())
            # for key in keys_copy:
            #     x, y = shifts[key][b'tvec']
            #     shifts[key][b'tvec-x'] = x
            #     shifts[key][b'tvec-y'] = y
            #     del shifts[key][b'tvec']

            fields = list(shifts[list(shifts.keys())[0]].keys())
            for key in list(shifts.keys()):
                for field in fields:
                    if field not in ['angle', 'tvec', 'scale', 'success']:
                        del shifts[key][field]
            for key in list(shifts.keys()):
                x, y = shifts[key]['tvec']
                shifts[key]['tvec-x'] = x
                shifts[key]['tvec-y'] = y
                del shifts[key]['tvec']

            fields = ["File aligned"] + list(shifts[list(shifts.keys())[0]].keys())
            with open(save_loc, "w", newline='') as f:
                w = csv.DictWriter(f, fields)
                w.writeheader()
                for k in shifts:
                    w.writerow({field: shifts[k].get(field) or k for field in fields})

            # for key, val in sorted(shifts.items()):
            #     row = {"File aligned": key}
            #     row.update(val)
            #     w.writerow(row)

            # with open(save_loc, 'wb') as csv_file:
            #     writer = csv.writer(csv_file)
            #     for key, value in shifts.items():
            #         writer.writerow([key, value])

        return ret_filenames
Esempio n. 23
0
def crossCorr_imreg_dft(img1, img2):
    shift = ird.translation(img1, img2, filter_pcorr=8, odds=1)
    # ~ filter_pcorr (int) – Radius of the minimum spectrum filter for translation detection, use the filter when detection fails. Values > 3 are likely not useful.
    shift = shift["tvec"].round(4)
    return np.asarray([shift[1], shift[0]])
Esempio n. 24
0
import os

import scipy as sp
import scipy.misc

import imreg_dft as ird

basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
result = ird.translation(im0, im1)
tvec = result["tvec"].round(4)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=tvec)

# Maybe we don't want to show plots all the time
if os.environ.get("IMSHOW", "yes") == "yes":
    import matplotlib.pyplot as plt
    ird.imshow(im0, im1, timg)
    plt.show()

print("Translation is {}, success rate {:.4g}".format(tuple(tvec),
                                                      result["success"]))
Esempio n. 25
0
import os

import scipy as sp
import scipy.misc

import imreg_dft as ird


basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
result = ird.translation(im0, im1)
tvec = result["tvec"].round(4)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=tvec)

# Maybe we don't want to show plots all the time
if os.environ.get("IMSHOW", "yes") == "yes":
    import matplotlib.pyplot as plt
    ird.imshow(im0, im1, timg)
    plt.show()

print("Translation is {}, success rate {:.4g}"
      .format(tuple(tvec), result["success"]))
Esempio n. 26
0
import os

import scipy as sp
import scipy.misc
import matplotlib.pyplot as plt

import imreg_dft as ird


basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
t0, t1 = ird.translation(im0, im1)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=(t0, t1))
ird.imshow(im0, im1, timg)
plt.show()
print(t0, t1)
Esempio n. 27
0
    def process(self, obj_data):
        """
        Coregister images

        @param obj_data: Image data wrapper
        """

        reg_type = self.ap_paramList[0]()

        master_burst_list = None
        for label, data in obj_data.getIterator():
            if master_burst_list == None:
                master_burst_list = select_valid_lines(
                    data, obj_data.info(label)['Tree'], cut=False)
                master_valid_lines = get_valid_lines(
                    obj_data.info(label)['Tree'], per_burst=True)
            else:

                burst_valid_lines = get_valid_lines(
                    obj_data.info(label)['Tree'], per_burst=True)
                valid_lines = [
                    np.logical_and(master_lines, burst_lines)
                    for master_lines, burst_lines in zip(
                        master_valid_lines, burst_valid_lines)
                ]

                burst_list = select_valid_lines(data,
                                                obj_data.info(label)['Tree'],
                                                cut=False)
                lines_per_burst = int(
                    obj_data.info(label)['Tree'].find(
                        'swathTiming/linesPerBurst').text)
                samples_per_burst = int(
                    obj_data.info(label)['Tree'].find(
                        'swathTiming/samplesPerBurst').text)
                lines, samples = np.meshgrid(np.arange(lines_per_burst),
                                             np.arange(samples_per_burst),
                                             indexing='ij')
                ramp = SentinelRamp(obj_data.info(label))

                for index, (burst_lines,
                            burst) in enumerate(zip(valid_lines, burst_list)):

                    start_valid_line = np.argmax(burst_lines)
                    end_valid_line = lines_per_burst - np.argmax(
                        burst_lines[::-1])

                    if self._image_limits == None:
                        line_slice = slice(start_valid_line, end_valid_line)
                        sample_slice = slice(0, samples_per_burst)

                    elif self._image_limits[index] != None:
                        line_slice = self._image_limits[index][0]
                        sample_slice = self._image_limits[index][1]

                        if line_slice.start == None or \
                           line_slice.start < start_valid_line:
                            line_slice_start = start_valid_line

                        else:
                            line_slice_start = line_slice.start

                        if line_slice.stop == None or \
                           line_slice.stop > end_valid_line:
                            line_slice_stop = end_valid_line

                        else:
                            line_slice_stop = line_slice.stop

                        line_slice = slice(line_slice_start, line_slice_stop)

                    else:
                        continue

                    master_burst = master_burst_list[index][line_slice,
                                                            sample_slice]

                    burst = burst[line_slice, sample_slice]
                    deramp = -ramp(lines[line_slice, sample_slice],
                                   samples[line_slice, sample_slice], index)

                    if reg_type == 'imreg_translation':

                        for i in range(self._num_iterations):

                            shift = ird.translation(np.abs(master_burst),
                                                    np.abs(burst))

                            transform_matrix = np.array(
                                [[1, 0, shift['tvec'][1]],
                                 [0, 1, shift['tvec'][0]]])

                            burst, deramp = transform_slc(
                                burst, deramp, transform_matrix)

                    elif reg_type == 'imreg_affine':

                        shift = ird.similarity(np.abs(master_burst),
                                               np.abs(burst),
                                               numiter=self._num_iterations)

                        im_angle = np.deg2rad(shift['angle'])
                        im_scale = shift['scale']
                        im_tl = shift['tvec']

                        transform_matrix = np.array(
                            [[
                                im_scale * np.cos(im_angle),
                                -im_scale * np.sin(im_angle), im_tl[1]
                            ],
                             [
                                 im_scale * np.sin(im_angle),
                                 im_scale * np.cos(im_angle), im_tl[0]
                             ]],
                            dtype=np.float32)
                        burst = transform_slc(burst, deramp,
                                              transform_matrix)[0]

                        if index != 0:
                            pass

                    elif reg_type == 'keypoints':
                        transform_matrix = keypoints_align(
                            scale_image(np.abs(master_burst)),
                            scale_image(np.abs(burst)))
                        burst = transform_slc(burst, deramp,
                                              transform_matrix)[0]

                    if line_slice.start == None:
                        line_start = 0
                    elif line_slice.start < 0:
                        line_start = lines_per_burst + line_slice.start
                    else:
                        line_start = line_slice.start

                    if line_slice.stop == None:
                        line_end = lines_per_burst
                    elif line_slice.stop < 0:
                        line_end = lines_per_burst + line_slice.stop
                    else:
                        line_end = line_slice.stop

                    full_data_slice = slice(
                        lines_per_burst * index + line_start,
                        lines_per_burst * (index) + line_end)

                    data[full_data_slice, sample_slice] = burst
Esempio n. 28
0
def ratio(verbose, logger, work_out_path, crop, res, register, union, h5_save,
          tiff_save, frange):
    # Start time
    time_start = timer()

    # Input background subtracted image stack
    try:
        f = h5py.File(work_out_path + '_back.h5', 'r')
    except:
        raise ImportError(work_out_path + "_back.h5 not found")

    # Input acceptor stack
    try:
        acceptor = np.array(f['acceptor'])
        acceptor_frange = np.array(f.attrs['acceptor_frange'])
    except:
        raise ImportError("Acceptor stack background not processed")

    # Input donor stack
    try:
        donor = np.array(f['donor'])
        donor_frange = np.array(f.attrs['donor_frange'])
    except:
        raise ImportError("Donor stack background not processed")

    f.close()

    # Find frame dimensions and intersection between processed frames and input frames
    Ydim, Xdim = acceptor.shape[1:]
    brange = np.intersect1d(frange, acceptor_frange, return_indices=True)[2]

    # Set default values for crop
    if (crop[2] == 0):
        crop[2] = Xdim
    if (crop[3] == 0):
        crop[3] = Ydim

    # Testing input values
    test.assert_array_equal(
        acceptor_frange,
        donor_frange), "Acceptor and Donor stacks have different frame numbers"
    assert (
        sum(~np.isin(frange, acceptor_frange)) == 0
    ), "background subtracted stacks have not been processed for all frame values"
    assert (crop[2] >= crop[0]), "crop[2] must be greater than crop[0]"
    assert (crop[3] >= crop[1]), "crop[3] must be greater than crop[1]"
    assert (crop[0] >= 0), "crop[0] must be >= 0"
    assert (crop[2] <= Xdim), "crop[2] must be <= than the width of the image"
    assert (crop[1] >= 0), "crop[1] must be >= 0"
    assert (crop[3] <= Ydim), "crop[3] must be <= than the height of the image"

    # Image crop
    acceptorc = acceptor[:, crop[1]:crop[3], crop[0]:crop[2]]
    donorc = donor[:, crop[1]:crop[3], crop[0]:crop[2]]

    # Search for saved ratio images
    try:
        # Input files into dictionaries
        f2 = h5py.File(work_out_path + '_ratio_back.h5', 'r')
        ratio_frange = np.array(f2.attrs['ratio_frange'])

        acceptori = dict(list(zip(ratio_frange, np.array(f2['acceptori']))))
        donori = dict(list(zip(ratio_frange, np.array(f2['donori']))))
        f2.close()
    except:
        # Initialize empty dictionaries for intensities
        acceptori, donori = {}, {}

    # Initialize empty dictionaries for pixel counts
    acceptornz, donornz = {}, {}

    # Set up constants for loop
    mult = np.float32(255) / np.float32(res)
    ires = 100 / np.float32(res)
    ipix = 100 / (Xdim * Ydim)

    # Loop through frames
    for count, frame in list(zip(frange, brange)):
        if (verbose):
            print("(Ratio Processing) Frame Number: " + str(count + 1))

        # Image registration for donor channel
        if (register):
            trans = ird.translation(acceptorc[frame, :, :],
                                    donorc[frame, :, :])
            tvec = trans["tvec"].round(4)
            donorc[frame, :, :] = np.round(
                ird.transform_img(donorc[frame, :, :], tvec=tvec))

        # Thresholding
        acceptors = np.uint8(np.float32(acceptorc[frame, :, :]) * mult)
        donors = np.uint8(np.float32(donorc[frame, :, :]) * mult)

        # Check for max image intensity
        if np.uint32(np.amax(acceptors)) + np.uint32(np.amax(donors)) > 70:
            # Otsu thresholding for normal intensity images
            _, A_thresh = cv2.threshold(acceptors, 0, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            _, B_thresh = cv2.threshold(donors, 0, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        else:
            # Simple thresholding for low intensity images
            _, A_thresh = cv2.threshold(acceptors, 3, 255, cv2.THRESH_BINARY)
            _, B_thresh = cv2.threshold(donors, 3, 255, cv2.THRESH_BINARY)

        # Setting values below threshold to zero
        acceptorc[frame, :, :] *= np.uint16(A_thresh / 255)
        donorc[frame, :, :] *= np.uint16(B_thresh / 255)

        # Consider only foreground pixel intensity overlapping between donor and acceptor channels to ensure channels overlap perfectly
        if (union):
            # Create mask for overlapping pixels
            C = np.multiply(A_thresh, B_thresh)
            C[C > 0] = 1

            # Set non-overlapping pixels to zero
            acceptorc[frame, :, :] *= C
            donorc[frame, :, :] *= C

        # Count number of non-zero pixels by total pixels per frame
        acceptornz[count] = np.count_nonzero(A_thresh) * ipix
        donornz[count] = np.count_nonzero(B_thresh) * ipix

        # Find the ratio of the median non-zero intensity pixels and the bit depth per frame for the acceptor stack
        if (np.amax(acceptorc[frame, :, :]) > 0.0):
            acceptori[count] = ndimage.median(acceptorc[frame, :, :],
                                              labels=A_thresh / 255) * ires
        else:
            acceptori[count] = 0

        # Find the ratio of the median non-zero intensity pixels and the bit depth per frame for the donor stack
        if (np.amax(donorc[frame, :, :]) > 0.0):
            donori[count] = ndimage.median(donorc[frame, :, :],
                                           labels=B_thresh / 255) * ires
        else:
            donori[count] = 0

    # End time
    time_end = timer()
    time_elapsed = str(int(time_end - time_start) + 1)
    if (verbose):
        print(("(Ratio Processing) Time: " + time_elapsed + " second(s)"))

    # Update log file to save stack metrics
    print_range = [x + 1 for x in frange]
    if (max(np.ediff1d(frange, to_begin=frange[0])) > 1):
        logger.info('(Ratio Processing) ' + 'frames: ' +
                    ",".join(list(map(str, print_range))) + ', time: ' +
                    time_elapsed + ' sec, save: ' + str(h5_save))
    else:
        logger.info('(Ratio Processing) ' + 'frames: ' + str(print_range[0]) +
                    '-' + str(print_range[-1]) + ', time: ' + time_elapsed +
                    ' sec, save: ' + str(h5_save))

    # Create plot to showcase median intensity over frame number and the number of foreground pixels per channel (NON-bleach corrected)
    time_evolution(acceptori, donori, work_out_path,
                   '_intensity_nonbleach.png', 'Median Intensity/Bit Depth',
                   h5_save)
    time_evolution(acceptornz, donornz, work_out_path, '_pixelcount.png',
                   'Foreground/Total Image Pixels', h5_save)

    # Calculate 8-bit ratio image with NON-bleach corrected donor and acceptor channels
    if (h5_save or tiff_save):
        # Calculate ratio stack
        ratio = ratio_calc(acceptorc, donorc)

        # Save processed images, non-zero pixel count, median intensity and ratio processed images in HDF5 format
        if (h5_save):
            acceptori_brange = np.array([acceptori[a] for a in brange])
            donori_brange = np.array([donori[a] for a in brange])

            h5_time_start = timer()
            h5(acceptorc[brange, :, :], 'acceptor',
               work_out_path + '_ratio_back.h5', frange)
            h5(donorc[brange, :, :], 'donor', work_out_path + '_ratio_back.h5',
               frange)
            h5(acceptori_brange, 'acceptori', work_out_path + '_ratio_back.h5',
               frange)
            h5(donori_brange, 'donori', work_out_path + '_ratio_back.h5',
               frange)
            h5(ratio[brange, :, :], 'ratio', work_out_path + '_ratio_back.h5',
               frange)
            h5_time_end = timer()

            if (verbose):
                print(("Saving Acceptor, Donor and Ratio stacks in " +
                       work_out_path + '_ratio_back.h5' + ' [Time: ' +
                       str(int(h5_time_end - h5_time_start) + 1) +
                       " second(s)]"))

        # Save NON-bleach corrected ratio image as TIFF
        if (tiff_save):
            tiff_time_start = timer()
            tiff(ratio, work_out_path + '_ratio_back.tif')
            tiff_time_end = timer()

            if (verbose):
                print(("Saving unbleached Ratio TIFF stack in " +
                       work_out_path + '_ratio_back.tif' + ' [Time: ' +
                       str(int(tiff_time_end - tiff_time_start) + 1) +
                       " second(s)]"))
Esempio n. 29
0
def calc_offset_points(points_1, points_2, shape, diam_kernel = 9, labels=['', ''], 
                                do_binary = True, do_plot_before=False, do_plot_after=False, do_plot_raw=False):
    """
    points_1, points_2: 
    """
    
    import hbt
    import matplotlib.pyplot as plt
    import imreg_dft as ird
    import numpy as np

    """ 
    Calculate the offset between a pair of ordered points -- e.g., an xy list
    of star positions, and and xy list of model postns.
    Returned offset is integer pixels as tuple (dy, dx).
    Input lists are of shape N x 2.
    Y = column 0
    X = column 1
    The sizes of the two lists do not need to be identical.
    """
    
#    diam_kernel = 5 # Set the value of the fake stellar image to plot
                    # diam_kernel = 5 is best for LORRI. 11 is too big, and we get the wrong answer. Very sensitive.

    diam_kernel = 9
    
    image_1 = hbt.image_from_list_points(points_1, shape, diam_kernel, do_binary=do_binary)
    image_2 = hbt.image_from_list_points(points_2, shape, diam_kernel, do_binary=do_binary)
 
#    (dy,dx) = get_image_translation(image_1, image_2)
    
    # Get the shift, using FFT method
    
    (dy,dx) = ird.translation(image_1, image_2)['tvec'] # Return shift, with t0 = (dy, dx). 
                                                        # ** API changed ~ Sep-16, Anaconda 4.2?

#    DO_PLOT_INPUT_FRAMES = False  
    
    if (do_plot_raw): # Plot the raw frames generated to calculate the shift
        plt.imshow(image_1)
        plt.title('Image 1 = ' + labels[0] + ', diam_kernel = {}'.format(diam_kernel))
        plt.show()
        
        plt.imshow(image_2)
        plt.title('Image 2 = ' + labels[1])
        plt.show()
        
        plt.imshow(image_1 + image_2)
        plt.title('Image 1+2 = ' + labels[1])
        plt.show()        
    
    print("dx={}, dy={}".format(dx,dy))
    
    if (do_plot_before):

        xrange = (0, shape[0]) # Set xlim (aka xrange) s.t. 
        yrange = (shape[1], 0)
#        yrange = (0, shape[1])

        plt.plot(points_1[:,1], points_1[:,0], marker='o', color='none', markersize=10, ls='None', 
                 label = labels[0], mew=1, mec='red')
        plt.plot(points_2[:,1], points_2[:,0], marker='o', color='lightgreen', markersize=4, ls='None', 
                 label = labels[1])
        plt.title('Before shift of dx={:.1f}, dy={:.1f}'.format(dx, dy))
        plt.legend(framealpha=0.5)
#        plt.set_aspect('equal')
       
        plt.xlim(xrange)    # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.show()

    if (do_plot_after):

        xrange = (0, shape[0]) # Set xlim (aka xrange) s.t. 
        yrange = (shape[1], 0)

        plt.plot(points_1[:,1], points_1[:,0], marker='o', color='none', markersize=10, ls='None', 
                 label = labels[0], mec='red', mew=1)
        plt.plot(points_2[:,1] + dy, points_2[:,0] + dx, marker='o', color='lightgreen', markersize=4, ls='None', 
                 label = labels[1])
        plt.legend(framealpha=0.5)
        plt.title('After shift of dx={:.1f}, dy={:.1f}'.format(dx, dy))
       
        plt.xlim(xrange)    # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.show()
        
    return (dy, dx)
Esempio n. 30
0
    def calc_blob_threads(self):
        """
        calculates blob threads

        Parameters
        ----------
        infill : bool
            whether or not to infill the 
        """
        for i in range(self.t):
            im1 = self.im.get_t()
            im1 = medFilter2d(im1)
            im1 = gaussian3d(im1, self.gaussian)
            if self.threed:
                peaks = findpeaks3d(
                    np.array(im1 *
                             np.array(im1 > np.quantile(im1, self.quantile))))
            else:
                peaks = findpeaks2d(
                    np.array(im1 *
                             np.array(im1 > np.quantile(im1, self.quantile))))
            peaks = reg_peaks(im1, peaks, thresh=self.reg_peak_dist)

            if self.register and i != 0:
                _off = ird.translation(
                    self.im.get_tbyf(i - 1,
                                     self.frames[int(len(self.frames) / 2)]),
                    im1[int(len(self.frames) / 2)])['tvec']

                _off = np.insert(_off, 0, 0)
                #peaks = peaks+ _off
                #print(_off)
                #print(peaks)
                self.spool.reel(peaks, self.anisotropy, offset=_off)
            else:
                self.spool.reel(peaks, self.anisotropy)

            if not self.suppress_output:
                print('\r' + 'Frames Processed: ' + str(i + 1) + '/' +
                      str(self.t),
                      sep='',
                      end='',
                      flush=True)
        print('\nInfilling...')
        self.spool.infill()

        imshape = tuple([len(self.frames)]) + self.im.sizexy

        def collided(positions, imshape, window=3):
            for i in [1, 2]:
                if np.sum(positions[:, i].astype(int) < window) != 0:
                    return True

                if np.sum(imshape[i] - positions[:, i].astype(int) < window +
                          1) != 0:
                    return True

            if np.sum(positions[:, 0].astype(int) < 0) != 0 or np.sum(
                    positions[:, 0].astype(int) > imshape[0] - 1) != 0:
                return True

            #if positions[0] < 0 or int(positions[0]) == imshape[0]:
            #    return True
            return False

            #for i in range(1,len(p)):
            #    if p[i] < window:
            #        return True
            #    elif s[i]-p[i] < window:
            #       return True

        print('Removing bad threads')
        #self.remove_bad_threads()
        destroy = []
        _a = len(self.spool.threads)
        for i in range(_a):
            if collided(self.spool.threads[i].positions, imshape):
                destroy.append(i)
            print('\r' + 'Blob Threads Checked: ' + str(i + 1) + '/' + str(_a),
                  sep='',
                  end='',
                  flush=True)
        print('\n')
        destroy = sorted(list(set(destroy)), reverse=True)
        if destroy:
            for item in destroy:
                self.spool.threads.pop(item)

        self._merge_within_z()

        self.spool.make_allthreads()
        print('Saving blob timeseries as numpy object...')
        mkdir(self.root + 'extractor-objects')
        file_pi = open(self.root + 'extractor-objects/threads.obj', 'wb')
        pickle.dump(self.spool, file_pi)
        file_pi.close()
def nh_find_simulated_rings_lorri():

# =============================================================================
# Now go thru the synthetic ring images. 
#    Load and stack the synthetic implanted images.
#    Load and stack the original 'raw' frames
#    Difference them, and see if we can find a ring in there.
# =============================================================================

    dir_porter = '/Users/throop/Dropbox/Data/NH_KEM_Hazard/Porter_Sep17/'
    dir_synthetic = '/Users/throop/Dropbox/Data/NH_KEM_Hazard/synthetic/'
    
    do_subpixel = False  # Flag: Do we use sub-pixel shifting when doing the flattening? 
                         # It is slower and in theory better, but in reality makes a trivial difference.

    # Start up SPICE
    
    file_kernel = 'kernels_kem.tm'
    sp.furnsh(file_kernel)
    
    # Load the images into a table
    
    images_raw = image_stack(dir_porter)
    images_syn = image_stack(dir_synthetic, do_force=False)
    
    stretch = astropy.visualization.PercentileInterval(95)
    plt.set_cmap('Greys_r')

    # =============================================================================
    # If desired, do a one-time routine for the synthetic images:
    #  extract the I/F and ring size from the filenames, and append that to the table.
    # This routine should be run after creating new synthetic images (e.g., adding an I/F value) 
    # =============================================================================
    
    DO_APPEND = False
    if (DO_APPEND):

        t_syn = images_syn.t
        num_images_syn = (np.shape(t_syn))[0]

        iof_ring  = np.zeros(num_images_syn, dtype=float)
        size_ring = np.zeros(num_images_syn, dtype='U30')
    
        for i in range(num_images_syn):
            f = t_syn['filename_short'][i]
            m = re.search('ring_(.*)_iof(.*)_K', f)  # Call regexp to parse it.
            iof_ring[i] = eval(m.group(2))
            size_ring[i] = m.group(1)
            
        t_syn['size_ring'] = size_ring
        t_syn['iof_ring']  = iof_ring
        images_syn.t = t_syn
        images_syn.save()           # Save the whole pickle archive (including images and table) back to disk
    
    data_raw = images_raw.data
    data_syn = images_syn.data
    
    t_raw = images_raw.t
    t_syn = images_syn.t
    
    num_images_raw = (np.shape(t_raw))[0]
    num_images_syn = (np.shape(t_syn))[0]

    # Look up the time offset, from the image title. (Would be better to have it stored in table, but this will do.)

    match = re.search('_K(.*)d', t_syn['filename_short'][0])
    
    dt_ca = ((match.group(1)*u.day).to('s'))  # Weird: we don't need .value here. I can't explain it.

    utc_ca = '2019 1 Jan 05:33'
    et_ca  = sp.utc2et(utc_ca)
    et_obs = et_ca + dt_ca
    
    # Set the pixel scale
    
    vec,lt = sp.spkezr('2014 MU69', et_obs, 'J2000', 'LT', 'New Horizons')
    vec_sc_targ = vec[0:3]
    dist_target_km = (sp.vnorm(vec_sc_targ)*u.km).value    
    scale_pix_lorri_1x1_rad = 0.3*hbt.d2r / 1024
    scale_pix_lorri_4x4_rad = scale_pix_lorri_1x1_rad * 4
    scale_pix_km_dict = {'1X1' : scale_pix_lorri_1x1_rad * dist_target_km,
                         '4X4' : scale_pix_lorri_4x4_rad * dist_target_km}  # We are 
    
    # Create a bunch of possible image sets, based on various parameters
    
    # Indices for 'raw' images
    
    indices_sep17_raw = t_raw['et'] > sp.utc2et('15 sep 2017')  # The positon of MU69 has changed a few pixels.
                                                                # We can't blindly co-add between sep and pre-sep
    indices_jan17_raw = t_raw['et'] < sp.utc2et('1 sep 2017')
                                                        
    indices_rot0_raw  = t_raw['angle'] < 180   # One rotation angle
    indices_rot90_raw = t_raw['angle'] > 180   # The other rotation angle
    indices_10sec_raw = np.logical_and( t_raw['exptime'] < 10, t_raw['exptime'] > 5  )
    indices_20sec_raw = np.logical_and( t_raw['exptime'] < 20, t_raw['exptime'] > 10 )
    
    indices_30sec_raw = np.logical_and( t_raw['exptime'] < 30, t_raw['exptime'] > 20 )
    
    indices_1x1_raw = t_raw['naxis1'] == 1024
    indices_4x4_raw = t_raw['naxis1'] == 256
    
    indices_30sec_4x4_raw = np.logical_and(indices_4x4_raw, indices_30sec_raw) # 94
    
    # Indices for synthetic images
    
    indices_ring_small_syn = t_syn['size_ring'] == 'small'
    indices_ring_large_syn = t_syn['size_ring'] == 'large'
    
    indices_iof_1em7_syn = t_syn['iof_ring'] == 1e-7
    indices_iof_3em7_syn = t_syn['iof_ring'] == 3e-7
    indices_iof_1em6_syn = t_syn['iof_ring'] == 1e-6
    indices_iof_1em5_syn = t_syn['iof_ring'] == 1e-5
    indices_iof_1em4_syn = t_syn['iof_ring'] == 1e-4
    
    indices_small_1em7_syn = np.logical_and(indices_iof_1em7_syn, indices_ring_small_syn)
    indices_small_3em7_syn = np.logical_and(indices_iof_3em7_syn, indices_ring_small_syn)
    indices_small_1em6_syn = np.logical_and(indices_iof_1em6_syn, indices_ring_small_syn)
    indices_small_1em5_syn = np.logical_and(indices_iof_1em5_syn, indices_ring_small_syn)
    indices_small_1em4_syn = np.logical_and(indices_iof_1em4_syn, indices_ring_small_syn)
    indices_large_1em7_syn = np.logical_and(indices_iof_1em7_syn, indices_ring_large_syn)
    indices_large_3em7_syn = np.logical_and(indices_iof_3em7_syn, indices_ring_large_syn)
    indices_large_1em6_syn = np.logical_and(indices_iof_1em6_syn, indices_ring_large_syn)
    indices_large_1em5_syn = np.logical_and(indices_iof_1em5_syn, indices_ring_large_syn)
    indices_large_1em4_syn = np.logical_and(indices_iof_1em4_syn, indices_ring_large_syn)

    # Choose which indiex. ** THIS IS WHERE WE SET THE RING TO USE!!
    
    indices_raw = indices_30sec_4x4_raw.copy()   # 94 of 344
    indices_syn = indices_small_1em6_syn.copy()  # 94 of 752

    # Now take the first half of the synthetic indices, and the second half of the raw ones
    # This is to assure that we are using different images for the two stacks! Otherwise, the results are trivial.
    
    frames_max = int(np.sum(indices_raw) / 2)         # Total number of frames (94)
    
    w = np.where(indices_raw)[0]
    indices_raw[w[frames_max]:] = False          # De-activate all frames *below* frames_max
    
    w = np.where(indices_syn)[0]
    indices_syn[:w[frames_max]] = False          # De-activate all frames above frames_max
    
    # Set the indices
    
    images_raw.set_indices(indices_raw)
    images_syn.set_indices(indices_syn)
    
    # Do the flattening
        
    arr_raw = images_raw.flatten(do_subpixel=do_subpixel)
    arr_syn = images_syn.flatten(do_subpixel=do_subpixel)
    
#    arr_raw_sub = images_raw.flatten(do_subpixel=True)    
#    arr_syn_sub = images_syn.flatten(do_subpixel=True)
    
    # Extract various fields from the data table. We can look up from any of the images -- they should be all the same.
    
    t_syn       = images_syn.t  # Get the data table
    
    iof_ring    = t_syn[indices_syn]['iof_ring'][0]
    size_ring   = t_syn[indices_syn]['size_ring'][0]
    exptime     = t_syn[indices_syn]['exptime'][0]
    
    # The two flattened images need some offsetting. Do that.
    
    shift = ird.translation(arr_raw, arr_syn)['tvec']
#    shift = np.round(shift).astype('int')
    
#    arr_syn_shift = np.roll(np.roll(arr_syn, int(round(shift[0])), axis=0), int(round(shift[1])), axis=1)
    arr_syn_shift = scipy.ndimage.shift(arr_syn, shift, order=5)  # This allows sub-pixel shifts, apparently. *NO*!

#    a = arr_syn.copy()
#    a_05_05 = scipy.ndimage.shift(arr_syn, (0.5, 0.5), order=5)  # Ugh. 0.5, 0.5 and 1, 1 are *exactly* the same.
#    a_1_05 = scipy.ndimage.shift(arr_syn, (1, 0.5), order=5)
#    a_1_1 = scipy.ndimage.shift(arr_syn, (1, 1), order=5)
#    a_1_15 = scipy.ndimage.shift(arr_syn, (1, 1.5), order=5)
#    a_1_0 = scipy.ndimage.shift(arr_syn, (1, 0), order=5)
#    a_05_0 = scipy.ndimage.shift(arr_syn, (0.5, 0), order=5)
    
    arr_diff  = arr_syn_shift  - arr_raw
    
    pos = (images_raw.y_pix_mean*4, images_raw.x_pix_mean*4)
    
    # Set the binning width of the radial profiles

    binning_pix = 5
    
    # Extract the radial profiles
    
    (dist_pix_1d, profile_1d_median) = get_radial_profile_circular(arr_diff, pos, method='median', width=binning_pix)
    (dist_pix_1d, profile_1d_mean)   = get_radial_profile_circular(arr_diff, pos, method='mean', width=binning_pix)

    str_title = ('Synthetic ring - raw, I/F = {:.0e}, {}, {} x {:.1f}s'.format(
            iof_ring, size_ring, frames_max, exptime))
    
    plt.imshow(stretch(arr_diff))
    plt.title(str_title)
    plt.plot(pos[1], pos[0], marker='.', color='red')
    plt.show()
    
    # Set the scale for the effective mode of these observations. Many are taken as 4x4, but we've rebinned to 1x1
    
    if (np.shape(arr_raw)[0] == 1024):
        scale_mode = '1X1'
    else:
        scale_mode = '4X4'
    scale_pix_km = scale_pix_km_dict[scale_mode]
    
    # Make a plot of the radial profile. Don't plot the innermost bin. It is useless, since it has so few pixels in it.
    
    hbt.figsize((12,8))
    
    plt.plot(dist_pix_1d[1:] * scale_pix_km, profile_1d_median[1:], label = 'Annulus median', alpha = 0.7)
#    plt.plot(dist_pix_1d[1:] * scale_pix_km, profile_1d_mean[1:],   label = 'Mean',   alpha = 0.2)
    plt.xlabel('Distance [km]')
    plt.ylabel('DN per pixel')
    plt.title(str_title + ', binning = {}'.format(binning_pix))
    plt.xlim((0,30000))
    
    # Set the y axis range. This is really stupid. Can't matplotlib figure this out itself?
    
    ax = plt.gca()
    lims = ax.get_xlim()
    i = np.where( (dist_pix_1d * scale_pix_km > lims[0]) &  (dist_pix_1d*scale_pix_km < lims[1]) )[0]
    ax.set_ylim( profile_1d_median[i].min(), profile_1d_median[i].max() ) 
    
    plt.legend()
    plt.show()
    plt.savefig()
Esempio n. 32
0
def navigate_image_stellar(im,
                           wcs_in,
                           name_catalog='',
                           do_plot=True,
                           method='fft',
                           title=''):
    """
    Navigate frame based on stellar images.
    Result returns is pixel shift (dy, dx).
    WCS paramaters are returned, *and* modified in place.
    """

    import imreg_dft as ird
    from astropy.wcs import WCS

    # from   astropy.vo.client import conesearch # Virtual Observatory, ie star catalogs   # DEPRECATED!
    from astroquery.vo_conesearch import conesearch  # New home of conesearch

    # Inputs are the image array, and the WCS structure.
    # This routine does not do any file IO. The image array and header must be already loaded.
    # The image is assumed to be stretched properly s.t. stars can be found using DAOphot.

    NUM_STARS_PHOT = 100  # How many stars to use from DAOPhot. For noisy images, DAO will find a lot of
    # fake stars, so we need to crank this up higher than the # of cat stars.
    NUM_STARS_CAT = 50  # How many stars to use from star catalog

    DO_GSC1 = False
    DO_GSC12 = True
    DO_USNOA2 = False

    #==============================================================================
    # Calculate the image radius, in radians, based on the size and the pixel scale
    #==============================================================================

    dx_pix = hbt.sizex(im)
    dy_pix = hbt.sizey(im)
    radec_corner = wcs_in.wcs_pix2world(0, dy_pix / 2, 0)
    radec_center = wcs_in.wcs_pix2world(dx_pix / 2, dy_pix / 2, 0)
    (ra_corner, dec_corner) = radec_corner
    (ra_center, dec_center) = radec_center

    radius_image = math.sqrt((dec_corner - dec_center)**2 + (
        (ra_corner - ra_center) / np.cos(dec_corner * hbt.d2r))**2) * hbt.d2r

    radius_search_deg = radius_image * hbt.r2d

    # Read the WCS coordinates

    center_deg = wcs_in.wcs.crval  # degrees. # crval is a two-element array of [RA, Dec], in degrees

    # Stretch the image. This is just for display -- no processing.

    stretch_percent = 90
    stretch = astropy.visualization.PercentileInterval(
        stretch_percent)  # PI(90) scales array to 5th .. 95th %ile.

    # Display it

    if (do_plot):
        plt.imshow(stretch(im))

#==============================================================================
# Get stars from star catalogs
#==============================================================================

    if (DO_GSC1):
        name_cat = u'The HST Guide Star Catalog, Version 1.1 (Lasker+ 1992) 1'  # works, but 1' errors; investigating
        stars = conesearch.conesearch(center_deg,
                                      radius_search_deg,
                                      cache=True,
                                      catalog_db=name_cat)
        ra_stars = np.array(
            stars.array['RAJ2000']) * hbt.d2r  # Convert to radians
        dec_stars = np.array(
            stars.array['DEJ2000']) * hbt.d2r  # Convert to radians
    #            table_stars = Table(stars.array.data)

    if (DO_GSC12):
        #        name_cat = u'The HST Guide Star Catalog, Version 1.2 (Lasker+ 1996) 1'
        name_cat = u'Guide Star Catalog v2 1'  # Works from gobi, not tomato
        url_cat = 'http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&'  # Works always

        with data.conf.set_temp(
                'remote_timeout',
                30):  # This is the very strange syntax to set a timeout delay.
            # The default is 3 seconds, and that times out often.
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # stars = conesearch.conesearch(wcs_in.wcs.crval, radius_search_deg, cache=True, catalog_db = url_cat)

                # The various functions of conesearch/ConeSearch/etc are quite confusing, and are in flux.
                # This line below seems to work. It does not allow an explicit catalog suggstion, but it does the job.

                c = astropy.coordinates.SkyCoord(wcs_in.wcs.crval[0],
                                                 wcs_in.wcs.crval[1],
                                                 unit='deg')
                stars = ConeSearch.query_region(c, f'{radius_search_deg} deg')

        ra_stars = np.array(stars.array['ra']) * hbt.d2r  # Convert to radians
        dec_stars = np.array(
            stars.array['dec']) * hbt.d2r  # Convert to radians

        mag = np.array(stars.array['Mag'])

        print("Stars downloaded: N = {}; mag = {:.2f} .. {:.2f}".format(
            np.size(mag), np.nanmin(mag), np.nanmax(mag)))
        print("RA = {:.2f} .. {:.2f}".format(
            np.nanmin(ra_stars) * hbt.r2d,
            np.nanmax(ra_stars) * hbt.r2d))

        # Now sort by magnitude, and keep the 100 brightest
        # This is because this GSC catalog is huge -- typically 2000 stars in LORRI FOV.
        # We need to reduce its size to fit in our fixed astropy table string length.

        order = np.argsort(mag)
        order = np.array(order)[0:NUM_STARS_CAT]

        ra_stars = ra_stars[order]  # Returned as radians
        dec_stars = dec_stars[order]

    if (DO_USNOA2):
        name_cat = u'The USNO-A2.0 Catalogue (Monet+ 1998) 1'  # Works but gives stars down to v=17; I want to v=13
        stars = conesearch.conesearch(wcs_in.wcs.crval,
                                      0.3,
                                      cache=False,
                                      catalog_db=name_cat)
        table_stars = Table(stars.array.data)
        mask = table_stars['Bmag'] < 13
        table_stars_m = table_stars[mask]

        ra_stars = table_stars_m['RAJ2000'] * hbt.d2r  # Convert to radians
        dec_stars = table_stars_m['DEJ2000'] * hbt.d2r  # Convert to radians

    ra_stars_cat = ra_stars
    dec_stars_cat = dec_stars

    radec_stars_cat = np.transpose(np.array((ra_stars_cat, dec_stars_cat)))

    (x_stars_cat,
     y_stars_cat) = wcs_in.wcs_world2pix(radec_stars_cat[:, 0] * hbt.r2d,
                                         radec_stars_cat[:, 1] * hbt.r2d, 0)

    points_stars_cat = np.transpose(
        (y_stars_cat, x_stars_cat))  # Yes, order is supposed to be (y,x)

    #==============================================================================
    # Use DAOphot to search the image for stars.
    #==============================================================================

    points_stars_phot = hbt.find_stars(
        im,
        num=NUM_STARS_PHOT)  # Returns N x 2 aray. 0 = Row = y; 1 = Column = x.

    y_stars_phot = (points_stars_phot[:, 0])  # xy is correct -- see above
    x_stars_phot = (points_stars_phot[:, 1])  #

    #==============================================================================
    # Make a plot showing the DAO stars on the image
    #==============================================================================

    color_phot = 'red'  # Color for stars found photometrically
    color_cat = 'lightgreen'  # Color for stars in catalog

    DO_PLOT_DAO = False  # Plot an intermediate result?

    if (DO_PLOT_DAO):

        plt.imshow(stretch(im))

        plt.plot(x_stars_phot,
                 y_stars_phot,
                 linestyle='none',
                 marker='o',
                 markersize=9,
                 mec=color_cat,
                 mew=1,
                 color='none',
                 label='DAO photometric stars')  # plot() uses x, y

        plt.plot(x_stars_cat,
                 y_stars_cat,
                 linestyle='none',
                 marker='o',
                 markersize=5,
                 color='lightgreen',
                 label='Cat stars')  # plot() uses x, y

        plt.title(title)
        plt.ylim((hbt.sizey(im)), 0)
        plt.xlim((0, hbt.sizex(im)))
        plt.legend(loc='upper left')
        plt.show()

# Up til here, x and y are correct

#==============================================================================
# Look up the shift between the photometry and the star catalog.
# Do this by making a pair of fake images, and then looking up image registration on them.
#==============================================================================

# I call this pointing process 'opnav'.
# It is returned in order (y,x) because that is what imreg_dft uses, even though it is a bit weird.

    diam_kernel = 11  # How many pixels across are our synthetic stellar images? Should be odd number. Not critical.
    do_binary = True  # For the stellar images, do a binary 1/0 (recommended), or a pixel distance?

    shape = np.shape(im)  # Set shape of output array

    image_cat = hbt.image_from_list_points(points_stars_cat,
                                           shape,
                                           diam_kernel,
                                           do_binary=do_binary)
    image_phot = hbt.image_from_list_points(points_stars_phot,
                                            shape,
                                            diam_kernel,
                                            do_binary=do_binary)

    if (method == 'fft'):  # Very fast method

        # Set up a constraint for the fit. It should be different for 1x1 and 4x4.
        # For 1x1, it works well to be 100 pixels.

        if (hbt.sizex(im) == 1024):  # For LORRI 1x1
            constraint_tx = (
                0, 100
            )  # Mean and stdev. i.e., returned value will be within stdev of mean.
            constraint_ty = (0, 100)

        if (hbt.sizex(im) == 256):  # For LORRI 4x4
            constraint_tx = (
                0, 25
            )  # Mean and stdev. i.e., returned value will be within stdev of mean.
            constraint_ty = (0, 25)

        constraint_angle = 0  # With one value, it is a fixed constraint.

        constraints = {
            'tx': constraint_tx,
            'ty': constraint_ty,
            'angle': constraint_angle
        }
        ird.translation(image_cat, image_phot, constraints=constraints)

        (dy, dx) = ird.translation(image_cat,
                                   image_phot,
                                   constraints=constraints)['tvec']
        dy_opnav = -dy
        dx_opnav = -dx

    if (method == 'bruteforce'):  # Very slow method

        ((dx, dy),
         mat) = hbt.get_translation_images_bruteforce(image_cat, image_phot)
        dx_opnav = -dx
        dy_opnav = -dy

#==============================================================================
# Make a plot, showing DAO positions + catalog positions
#==============================================================================

    do_plot = True
    if (do_plot):

        #        hbt.figsize((10,10))

        plt.imshow(stretch(im))

        # Plot the stars -- catalog, and DAO

        plt.plot(x_stars_cat + dx_opnav,
                 y_stars_cat + dy_opnav,
                 marker='o',
                 ls='None',
                 color=color_cat,
                 alpha=0.5,
                 ms=12,
                 mew=1,
                 label='Cat Stars, adjusted')

        plt.plot(x_stars_cat,
                 y_stars_cat,
                 marker='o',
                 ls='None',
                 color=color_cat,
                 alpha=1,
                 ms=4,
                 mew=1,
                 label='Cat Stars, raw')

        plt.plot(x_stars_phot,
                 y_stars_phot,
                 marker='o',
                 ls='None',
                 color='none',
                 markersize=10,
                 mew=1,
                 mec=color_phot,
                 alpha=1,
                 label='DAOfind Stars')

        plt.title('After navigation, with dx = {:.1f}, dy = {:.1f}, {}'.format(
            dx_opnav, dy_opnav, title))
        plt.legend(
        )  # Draw legend. Might be irrel since remove() might keep it; not sure.

        plt.imshow(stretch(im))
        plt.show()

#==============================================================================
# Return results and exit
#==============================================================================

# Results are returned in terms of pixel offset and a revised WCS structure.
# I don't seem to be able to copy a WCS structure, so I modify the one in place!

# Get the pixel location of the center position

    crpix = wcs_in.wcs.crpix  # Center position, in pixels, old

    # Get the new RA, Dec center of the array. It is just the old location, plus the offset

    ORIGIN_FORMAT = 1  # 0 for Numpy-style indexing, 1 for Fortran-style and FITS-style.
    # So what do I used for FITS files in python? Experimentally, 1 is right and 0 is not.

    (ra_new, dec_new) = wcs_in.wcs_pix2world(crpix[0] - dx_opnav,
                                             crpix[1] - dy_opnav,
                                             ORIGIN_FORMAT)

    # Set it

    wcs_in.wcs.crval = (ra_new, dec_new)

    return (wcs_in, (dy_opnav, dx_opnav))
Esempio n. 33
0
    def cache(self, path):
        As = [
            os.path.join(path, p) for p in os.listdir(path)
            if p.startswith('A') and p.endswith(self.ext)
        ]
        Bs = [
            os.path.join(path, p) for p in os.listdir(path)
            if p.startswith('B') and p.endswith(self.ext)
        ]
        LRs = [
            os.path.join(path, p) for p in os.listdir(path)
            if p.startswith('LR') and p.endswith(self.ext)
        ]
        if os.path.exists(os.path.join('mask_A' + self.ext)):
            m = np.array(Image.open(os.path.join('mask_A' + self.ext)))
            m = np.expand_dims(m, axis=2) if m.ndim == 2 else m
            maskA = m < m.max() / 2
        else:
            maskA = None
        if os.path.exists(os.path.join('mask_B' + self.ext)):
            m = np.array(Image.open(os.path.join('mask_B' + self.ext)))
            m = np.expand_dims(m, axis=2) if m.ndim == 2 else m
            maskB = m < m.max() / 2
        else:
            maskB = None
        ImgAs, PathAs, ImgBs, PathBs, ImgLRs, PathLRs = [], [], [], [], [], []
        for p in As:
            try:
                img = np.array(Image.open(p))
                img = np.expand_dims(img, axis=2) if img.ndim == 2 else img
                if maskA:
                    img[maskA] = img.min()
                ImgAs.append(img)
                PathAs.append(p)
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print('error when reading file ', p)
        assert len(ImgAs) > 0, 'no file found for "A"'
        for p in Bs:
            try:
                img = np.array(Image.open(p))
                img = np.expand_dims(img, axis=2) if img.ndim == 2 else img
                if maskB:
                    img[maskB] = img.min()
                ImgBs.append(img)
                PathBs.append(p)
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print('error when reading file ', p)

        for p in LRs:
            try:
                img = np.array(Image.open(p))
                assert img.ndim == 2
                if self.drift_correction:
                    import imreg_dft as ird
                    from skimage import exposure
                    b = ImgBs[0][:, :, 0]
                    b = exposure.equalize_hist(b)
                    b = scipy.ndimage.filters.gaussian_filter(b, sigma=(6, 6))
                    b = scipy.misc.imresize(b, img.shape[:2])
                    ts = ird.translation(b, img)
                    tvec = ts["tvec"].round(4)
                    # the Transformed IMaGe.
                    img = ird.transform_img(img, tvec=tvec)
                img = scipy.misc.imresize(img, ImgBs[0].shape[:2])
                img = np.expand_dims(img, axis=2)
                ImgLRs.append(img)
                PathLRs.append(p)
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print('error when reading file ', p)
                import traceback, sys
                traceback.print_exc(file=sys.stdout)

        self.__cache[path] = {
            'A': ImgAs,
            'B': ImgBs,
            'LR': ImgLRs,
            'path': path,
            'pathA': PathAs,
            'pathB': PathBs,
            'pathLR': PathLRs
        }
        return True
Esempio n. 34
0
def calc_offset_points(points_1,
                       points_2,
                       shape,
                       diam_kernel=9,
                       labels=['', ''],
                       do_binary=True,
                       do_plot_before=False,
                       do_plot_after=False,
                       do_plot_raw=False):
    """
    points_1, points_2: 
    """

    import hbt
    import matplotlib.pyplot as plt
    import imreg_dft as ird
    import numpy as np
    """ 
    Calculate the offset between a pair of ordered points -- e.g., an xy list
    of star positions, and and xy list of model postns.
    Returned offset is integer pixels as tuple (dy, dx).
    Input lists are of shape N x 2.
    Y = column 0
    X = column 1
    The sizes of the two lists do not need to be identical.
    """

    #    diam_kernel = 5 # Set the value of the fake stellar image to plot
    # diam_kernel = 5 is best for LORRI. 11 is too big, and we get the wrong answer. Very sensitive.

    diam_kernel = 9

    image_1 = hbt.image_from_list_points(points_1,
                                         shape,
                                         diam_kernel,
                                         do_binary=do_binary)
    image_2 = hbt.image_from_list_points(points_2,
                                         shape,
                                         diam_kernel,
                                         do_binary=do_binary)

    #    (dy,dx) = get_image_translation(image_1, image_2)

    # Get the shift, using FFT method

    (dy, dx) = ird.translation(
        image_1, image_2)['tvec']  # Return shift, with t0 = (dy, dx).
    # ** API changed ~ Sep-16, Anaconda 4.2?

    #    DO_PLOT_INPUT_FRAMES = False

    if (do_plot_raw):  # Plot the raw frames generated to calculate the shift
        plt.imshow(image_1)
        plt.title('Image 1 = ' + labels[0] +
                  ', diam_kernel = {}'.format(diam_kernel))
        plt.show()

        plt.imshow(image_2)
        plt.title('Image 2 = ' + labels[1])
        plt.show()

        plt.imshow(image_1 + image_2)
        plt.title('Image 1+2 = ' + labels[1])
        plt.show()

    print("dx={}, dy={}".format(dx, dy))

    if (do_plot_before):

        xrange = (0, shape[0])  # Set xlim (aka xrange) s.t.
        yrange = (shape[1], 0)
        #        yrange = (0, shape[1])

        plt.plot(points_1[:, 1],
                 points_1[:, 0],
                 marker='o',
                 color='none',
                 markersize=10,
                 ls='None',
                 label=labels[0],
                 mew=1,
                 mec='red')
        plt.plot(points_2[:, 1],
                 points_2[:, 0],
                 marker='o',
                 color='lightgreen',
                 markersize=4,
                 ls='None',
                 label=labels[1])
        plt.title('Before shift of dx={:.1f}, dy={:.1f}'.format(dx, dy))
        plt.legend(framealpha=0.5)
        #        plt.set_aspect('equal')

        plt.xlim(
            xrange
        )  # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.show()

    if (do_plot_after):

        xrange = (0, shape[0])  # Set xlim (aka xrange) s.t.
        yrange = (shape[1], 0)

        plt.plot(points_1[:, 1],
                 points_1[:, 0],
                 marker='o',
                 color='none',
                 markersize=10,
                 ls='None',
                 label=labels[0],
                 mec='red',
                 mew=1)
        plt.plot(points_2[:, 1] + dy,
                 points_2[:, 0] + dx,
                 marker='o',
                 color='lightgreen',
                 markersize=4,
                 ls='None',
                 label=labels[1])
        plt.legend(framealpha=0.5)
        plt.title('After shift of dx={:.1f}, dy={:.1f}'.format(dx, dy))

        plt.xlim(
            xrange
        )  # Need to set this explicitly so that points out of image range are clipped
        plt.ylim(yrange)
        plt.show()

    return (dy, dx)
Esempio n. 35
0
import os

import scipy as sp
import scipy.misc
import matplotlib.pyplot as plt

import imreg_dft as ird

basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
t0, t1 = ird.translation(im0, im1)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=(t0, t1))
ird.imshow(im0, im1, timg)
plt.show()
print(t0, t1)