def computeAzError(img1, img2, img3): (_, _, adjust, _, _) = computeAzDrift(img1, img2) d1 = image2Array(img1, const) d2 = image2Array(img2, const) d3 = image2Array(img3, const) p, (_, _) = aa.find_transform(d1, d2) d1d2rotate = p.rotation * 180.0 / np.pi p, (_, _) = aa.find_transform(d1, d3) d1d3rotate = p.rotation * 180.0 / np.pi p, (pos_img, pos_img_rot) = aa.find_transform(d2, d3) d2d3rotate = p.rotation * 180.0 / np.pi print("ratation 1-2:" + "{:.2f}".format(d1d2rotate)) print("ratation 1-3:" + "{:.2f}".format(d1d3rotate)) print("ratation 2-3:" + "{:.2f}".format(d2d3rotate)) p, (_, _) = aa.find_transform(d2, d3) rotate = p.rotation * 180.0 / np.pi d3 = image2Array(img3, rotate) p, (_, _) = aa.find_transform(d2, d3) driftX = p.translation[0] return adjust, abs(adjust) - abs(driftX)
def test_consistent_invert(self): t, __ = aa.find_transform(self.image, self.image_ref) tinv, __ = aa.find_transform(self.image_ref, self.image) rpoint = np.random.rand(3) * self.h rpoint[2] = 1.0 rtransf = tinv.params.dot(t.params.dot(rpoint)) err = np.linalg.norm(rpoint - rtransf) / np.linalg.norm(rpoint) self.assertLess(err, 1e-2)
def align(image, refimage): #magic!! try: p, (pos_image, pos_refimage) = aa.find_transform(image, refimage) return p #transformation parameters except RuntimeWarning: image += abs(np.amin(image) + 10) # image = np.log10(image) p, (pos_image, pos_refimage) = aa.find_transform(image, refimage) return p #transformation parameters
def align(self): #magic!! try: self = np.log10(self) p, (pos_self, pos_refimage) = aa.find_transform(self, refimage) return p #transformation parameters except RuntimeWarning: self += abs(np.amin(self) + 10) self = np.log10(self) p, (pos_self, pos_refimage) = aa.find_transform(self, refimage) return p #transformation parameters
def computeAlError(url1, url2, url3, ra, starLocation): (timeL, yDrift, adjustment) = computeAlDrift(url1, url2, ra, starLocation) d1 = image2Array(url1, 0) d3 = image2Array(url3, 0) p, (_, _) = aa.find_transform(d1, d3) rotate = p.rotation * 180.0 / np.pi d3 = image2Array(url3, rotate) p, (_, _) = aa.find_transform(d1, d3) error = abs(adjustment) - abs(p.translation[1]) return adjustment, error
def computeAlDrift(url1, url2, ra, starLocation): d1 = image2Array(url1, 0) d2 = image2Array(url2, 0) p, (_, _) = aa.find_transform(d1, d2) rotate = p.rotation * 180.0 / np.pi d2 = image2Array(url2, rotate) p, (_, _) = aa.find_transform(d1, d2) timeL = timeLapsedInMins(url1, url2) theta = ra Calt = 229 * tan(int(theta.strip())) yDrift = p.translation[1] adjustment = (Calt / timeL + 1) * yDrift if starLocation is 'E' else ( Calt / timeL - 1) * yDrift return (timeL, yDrift, adjustment)
def sources(sourcelist, imagelist=None, reference=None): """ Takes a NxM list of sources, where N is the number of images and M is the number of sources in each image Operates in-place. Arguments: catalogs -- list of sources Keyword Arguments: imagelist -- An optional list of images which map 1:1 to sources if provided, the calculated transformations will be applied to images reference -- A set of points to use as a reference; default 0th index """ # FIXME this is really slow, move to Cython or do some numpy magic with # Sources class aligned = [] if reference is None: reference = sourcelist[0] if imagelist is None: imagelist = [None] * len(sourceslist) npref = [[r.x, r.y] for r in reference] for cat, im in zip(sourcelist, imagelist): npc = [[c.x, c.y] for c in cat] T, _ = astroalign.find_transform(npc, npref) if im is not None: astroalign.apply_transform(T, im, im) for source in cat: source.transform(T)
def test_find_transform_givensources(self): from skimage.transform import estimate_transform, matrix_transform source = np.array([ [1.4, 2.2], [5.3, 1.0], [3.7, 1.5], [10.1, 9.6], [1.3, 10.2], [7.1, 2.0], ]) nsrc = source.shape[0] scale = 1.5 # scaling parameter alpha = np.pi / 8.0 # rotation angle mm = scale * np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]) tx, ty = 2.0, 1.0 # translation parameters transl = np.array([nsrc * [tx], nsrc * [ty]]) dest = (mm.dot(source.T) + transl).T t_true = estimate_transform("similarity", source, dest) # disorder dest points so they don't match the order of source np.random.shuffle(dest) t, (src_pts, dst_pts) = aa.find_transform(source, dest) self.assertLess(t_true.scale - t.scale, 1e-10) self.assertLess(t_true.rotation - t.rotation, 1e-10) self.assertLess(np.linalg.norm(t_true.translation - t.translation), 1e-10) self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) self.assertEqual(src_pts.shape[1], 2) self.assertEqual(dst_pts.shape[1], 2) dst_pts_test = matrix_transform(src_pts, t.params) self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10)
def _align(self, img: Image) -> Image: reference = self.db.get_stacked_image(str(img.key)) if reference is None: logging.info(f"no reference found for {img.key}") return img assert img.data.dtype == np.float32 and img.data.min() >= 0.0 and img.data.max() <= 1.0, f"{img.data.dtype} {img.data.max()} {img.data.min()}" assert reference.data.dtype == np.float32 and reference.data.min() >= 0.0 and reference.data.max() <= 1.0, f"{reference.data.dtype} {reference.data.max()} {reference.data.min()}" assert reference.data.ndim == img.data.ndim, f"{reference.data.ndim} {img.data.ndim}" assert reference.data.shape == img.data.shape, f"{reference.data.shape} {img.data.shape}" with Timer(f"aligning image for {img.key}"): if img.data.ndim == 2: registered, footprint = aa.register(img.data, reference.data, fill_value=0.0) img.data = registered elif img.data.ndim == 3: transform, _ = aa.find_transform(img.data[0], reference.data[0]) for i in range(3): transformed, _ = aa.apply_transform(transform, img.data[i], reference.data[i], fill_value=0.0) img.data[i] = transformed else: raise Exception(f"invalid image dimensions {img.data.ndim}") assert img.data.dtype == np.float32 and img.data.min() >= 0.0 and img.data.max() <= 1.0, f"{img.data.dtype} {img.data.max()} {img.data.min()}" return img
def hdu_shift_images(hdu_list, method='fft', register_method='asterism', footprint=False, logger=logger): """Calculate and apply shifts in a set of ccddata images. The function process the list inplace. Original data altered. methods: - "asterism" : align images using asterism matching (astroalign) - "chi2" : align images using chi2 minimization (image_registration) - "fft" : align images using fourier transform correlation (skimage) """ if method == "asterism": logger.info("Registering images with astroalign.") if astroalign is None: raise RuntimeError("astroaling module not available.") im0 = hdu_list[0].data for i in hdu_list[1:]: transf, _ = astroalign.find_transform(i.data, im0) i.data = astroalign.apply_transform(transf, i.data, im0) if footprint: i.footprint = astroalign.apply_transform( transf, np.ones(i.data.shape, dtype=bool), im0) # s_method = 'similarity_transform' else: if method == 'chi2': shifts = create_chi2_shift_list([ccd.data for ccd in hdu_list]) else: shifts = create_fft_shift_list([ccd.data for ccd in hdu_list]) logger.info(f"Aligning CCDData with shifts: {shifts}") for ccd, shift in zip(hdu_list, shifts): # if method == 'fft': # s_method = method # else: # s_method = 'simple' # ccd.data = apply_shift(ccd.data, shift, method=s_method, # logger=logger) # s_method = 'simple' ccd.data, ccd.masks = apply_shift(ccd.data, shift, logger=logger) sh_string = [str(i) for i in shift] ccd.header['hierarch astropop register_shift'] = ",".join( sh_string) # if footprint: # ccd.footprint = apply_shift(np.ones_like(ccd.data, dtype=bool), # shift, method='simple', # logger=logger) if footprint: [], [], ccd.footprint = apply_shift(np.ones_like(ccd.data, dtype=bool), shift, logger=logger) for i in hdu_list: i.header['hierarch astropop registered'] = True # i.header['hierarch astropop register_method'] = method # i.header['hierarch astropop transform_method'] = s_method return hdu_list
def computeAzDrift(img1, img2): Caz = 58.3079 d1 = image2Array(img1, const) d2 = image2Array(img2, const) p, (_, _) = aa.find_transform(d1, d2) rotate = p.rotation * 180.0 / np.pi d2 = image2Array(img2, rotate) p, (pos_img, pos_img_rot) = aa.find_transform(d1, d2) timeLapsed = timeLapsedInMins(img1, img2) drift = p.translation[1] * -1 #top left is 00 instead of bot left adjust = Caz * abs(drift) / timeLapsed candidate = sorted(pos_img, key=lambda x: x[0] if drift < 0 else x[1], reverse=True if drift > 0 else False)[0] if debug is True: for (x1, y1), (x2, y2) in zip(pos_img, pos_img_rot): print( "S({:.2f}, {:.2f}) --> D({:.2f}, {:.2f}) xDiff: {:.2f} yDiff: {:.2f}" .format(x1, y1, x2, y2, (x2 - x1), (y2 - y1))) print("rotate:" + str(rotate) + "post roation" + str(p.rotation)) return (timeLapsed, drift, adjust, candidate[0], candidate[1])
def align_images(image, target): """Find the SimilaryTransform to align an input image with a target image.""" try: tform, (_, _) = astroalign.find_transform(image, target) except astroalign.MaxIterError: offset = np.array([0, 0]) rot = np.array([[1, 0], [0, 1]]) success = False else: offset = tform.translation rot = tform.params[:2, :2] success = True return offset, rot, success
def check_if_findtransform_ok(self, numstars): """Helper function to test find_transform with common test code for 3, 4, 5, and 6 stars""" from skimage.transform import estimate_transform, matrix_transform if numstars > 6: raise NotImplementedError # x and y of stars in the ref frame (int's) self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars] self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars] self.num_stars = numstars # Fluxes of stars self.star_f = np.array(numstars * [700.0]) ( self.image, self.image_ref, self.star_ref_pos, self.star_new_pos, ) = simulate_image_pair( shape=(self.h, self.w), translation=(self.x_offset, self.y_offset), rot_angle_deg=50.0, num_stars=self.num_stars, star_refx=self.star_refx, star_refy=self.star_refy, star_flux=self.star_f, ) source = self.star_ref_pos dest = self.star_new_pos.copy() t_true = estimate_transform("similarity", source, dest) # disorder dest points so they don't match the order of source np.random.shuffle(dest) t, (src_pts, dst_pts) = aa.find_transform(source, dest) self.assertLess(t_true.scale - t.scale, 1e-10) self.assertLess(t_true.rotation - t.rotation, 1e-10) self.assertLess(np.linalg.norm(t_true.translation - t.translation), 1.0) self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) self.assertLessEqual(src_pts.shape[0], source.shape[0]) self.assertEqual(src_pts.shape[1], 2) self.assertEqual(dst_pts.shape[1], 2) dst_pts_test = matrix_transform(src_pts, t.params) self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0)
def align_image(image_name_1, image_name_2, new_file_name): File_List = [image_name_1, image_name_2] with fits.open(f'RAW_DATA/{File_List[0]}') as image_hdu_1: Data_1 = image_hdu_1[0].data image_hdu_1.close() with fits.open(f'RAW_DATA/{File_List[1]}') as image_hdu_2: Data_2 = image_hdu_2[0].data # Creating a numpy array of the exposure image_hdu_2.close() Image_Data = [Data_1, Data_2] transf, (source_list, target_list) = aa.find_transform(Image_Data[0], Image_Data[1]) Aligned_Image_Data, footprint = aa.apply_transform(transf, Image_Data[0], Image_Data[1]) # Aligning plt.style.use(astropy_mpl_style) Norm = ImageNormalize(stretch = SqrtStretch()) plt.figure(1) plt.imshow(Image_Data[0], cmap = 'gray', interpolation = 'bicubic', norm = Norm, vmin = 700, vmax = 800) plt.colorbar() plt.figure(2) plt.imshow(Image_Data[1], cmap = 'gray', interpolation = 'bicubic', norm = Norm) plt.colorbar() plt.figure(3) plt.imshow(Aligned_Image_Data, cmap = 'gray', interpolation = 'bicubic', norm = Norm) plt.colorbar() with fits.open(f'RAW_DATA/{File_List[0]}', mode = 'update') as image_hdu_3: image_hdu_3[0].data = Aligned_Image_Data image_hdu_3.flush() image_hdu_3.writeto(f'AlIGNED/{new_file_name}', overwrite = True) # Writing to a new fits file return
def register_stars(images, ref_img=None): ''' Register a field of stars in translation, rotation, and scaling. Parameters ========== images : ndarray of shape (N, ny, nx) cube containing the images to align. ref_img : ndarray of shape (ny, nx) or None (default: None) The reference image relatively to which all images should be aligned. If None, use the first input image. Returns ======= registered_images: ndarray of shape (N, ny, nx) version of the input, with all images aligned with ref_img. ''' # registered_images = np.full_like(images, np.nan) if ref_img is None: ref_img = images[0] first_im_is_ref = True else: first_im_is_ref = False ref_sources = astroalign._find_sources(ref_img) iterable = tqdm(images, desc='Aligning images', total=len(images)) for i, img in enumerate(iterable): if i == 0 and first_im_is_ref: continue try: p, _ = astroalign.find_transform(img, ref_sources) mat = p.params[:-1] except Exception as e: warnings.warn('Image {}: {}'.format(i, e)) mat = np.array([[1, 0, 0], [0, 1, 0]], dtype=float) images[i] = affine_transform(img, mat) return images
def aamatch(spots_1, spots_2, source_invariants=None, source_asterisms=None, source_invariant_tree=None, target_invariants=None, target_asterisms=None, target_invariant_tree=None): """Attempts to match 2 lists of spots. Args: spots_1 (List[List[float]]): The list of spots in list 1 spots_2 (List[List[float]]): The list of spots in list 2 source_invariants (Object, optional): The cached triangle invariants for list 1 if available. Defaults to None. source_asterisms (Object, optional): The triangle asterisms for list 1 if available. Defaults to None. source_invariant_tree (Object, optional): The invariant kd-tree for list 1 if available. Defaults to None. target_invariants (Object, optional): The cached triangle invariants for list 2 if available. Defaults to None. target_asterisms (Object, optional): The triangle asterisms for list 2 if available. Defaults to None. target_invariant_tree (Object, optional): The invariant kd-tree for list 2 if available. Defaults to None. Returns: Tuple[np.ndarray, int, float, List[int], List[int]]: Tuple of the Affine transform matrix mapping one point onto the other, the number of inliers, the score and the matching indexes form list 1 and list 2 """ try: T, (s_pos, t_pos), (s_idx, t_idx) = astroalign.find_transform( spots_1, spots_2, source_invariants=source_invariants, source_asterisms=source_asterisms, source_invariant_tree=source_invariant_tree, target_invariants=target_invariants, target_asterisms=target_asterisms, target_invariant_tree=target_invariant_tree) return (T, len(s_pos), (len(s_pos) / len(spots_1)) * (len(t_pos) / len(spots_2)), s_idx, t_idx) except Exception as e: return None, 0, 0, [], []
def test_unrepeated_sources(self): source = np.array([[0.0, 2.0], [1.0, 3.0], [2.1, 1.75], [3.5, 1.0], [4.0, 2.0]]) R = np.array([ [np.cos(30.0 * np.pi / 180), np.sin(30.0 * np.pi / 180)], [-np.sin(30.0 * np.pi / 180), np.cos(30.0 * np.pi / 180)], ]) tr = np.array([-0.5, 2.5]) target = R.dot(source.T).T + tr best_t, (s_list, t_list) = aa.find_transform(source, target) self.assertEqual(len(s_list), len(t_list)) self.assertLessEqual(len(s_list), len(source)) # Assert no repeated sources used source_set = set((x, y) for x, y in s_list) self.assertEqual(len(s_list), len(source_set)) # Assert no repeated targets used target_set = set((x, y) for x, y in t_list) self.assertEqual(len(t_list), len(target_set)) # Assert s_list is a subset of source self.assertTrue(source_set <= set((x, y) for x, y in source)) # Assert t_list is a subset of target self.assertTrue(target_set <= set((x, y) for x, y in target))
def registra_lista(lista): cantidad = len(lista) # La primera imagen de la lista será la toma de referencia. print("\nComenzando la alineación.") print("\nLa toma de referencia es {:}".format(lista[0])) blanco = ft.open(lista[0]) img_blanco = blanco[0].data hdr_blanco = blanco[0].header blanco.close() del (lista[0]) # Quito la imagen de referencia del listado for ii in lista: ff = ft.open(ii) img_torcida = ff[0].data hdr_torcida = ff[0].header ff.close() p, (pos_img, pos_img_rot) = astroalign.find_transform(img_torcida, img_blanco) imprimir_info(p, ii) img_aligned = astroalign.register(img_torcida, img_blanco) hdr_torcida.add_comment("Registrado con Astroalign y PyReduc") ft.writeto(ii, img_aligned, header=hdr_torcida, overwrite=True) print("\nRegistrado realizado con éxito")
def register(*args): """Take several images of type numpy.ndarray and align (register) them relative to the first image. """ # Multiple color layers? Use just the green layer for alignment. def singlelayer(img): if len(img.shape) == 3: return img[:, :, 1] return img img1 = singlelayer(args[0]) img2 = singlelayer(args[1]) # Register the two images img_aligned, footprint = astroalign.register(img1, img2) # Plot the results # plot_three(img1, img2, img_aligned) transf, (pos_img, pos_img_rot) = astroalign.find_transform(img1, img2) def print_stats(): print("Rotation: %2d degrees" % (transf.rotation * 180.0 / np.pi)) print("\nScale factor: %.2f" % transf.scale) print("\nTranslation: (x, y) = (%.2f, %.2f)" % tuple(transf.translation)) print("\nTranformation matrix:\n", transf.params) print("\nPoint correspondence:") for (x1, y1), (x2, y2) in zip(pos_img, pos_img_rot): print("(%.2f, %.2f) in source --> (%.2f, %.2f) in target" % (x1, y1, x2, y2)) # print_stats() # Plot correspondences plot_three(img1, img2, img_aligned, pos_img=pos_img, pos_img_rot=pos_img_rot, transf=transf) # Align again using the transform. # Will use this to align the other channels after using one # channel to register the two images. # The documentation doesn't mention a footprint being part of the return, # but it is. realigned, footprint = astroalign.apply_transform(transf, img1, img2) # plot_three(img1, img2, realigned) newshape = args[1].shape if len(newshape) == 2: newshape = args[1].shape + (3, ) # trying https://stackoverflow.com/a/10445502 rgbArray = np.zeros(newshape, 'uint8') for i in range(newshape[-1]): rgbArray[..., i] = realigned img = Image.fromarray(rgbArray) outfile = '/tmp/out.jpg' img.save(outfile) print("Saved to", outfile)
for root, dirs, files in os.walk("./images/"): files.sort() first = files.pop(0) target = read_image(first) target -= dark target_luma = cv.cvtColor(target, cv.COLOR_BGR2GRAY) accumulated = target counter = 1 for filename in files: source = read_image(filename) print("Projecting", filename) source_luma = cv.cvtColor(source, cv.COLOR_BGR2GRAY) try: transf, (source_list, target_list) = aa.find_transform( source=source_luma, target=target_luma, max_control_points=MAX_CONTROL_POINTS) projection_0, footprint = aa.apply_transform( transf, source[:, :, 0], target[:, :, 0]) projection_1, footprint = aa.apply_transform( transf, source[:, :, 1], target[:, :, 1]) projection_2, footprint = aa.apply_transform( transf, source[:, :, 2], target[:, :, 2]) projection = np.stack([projection_0, projection_1, projection_2], axis=2) cv.imwrite("{:03d}.tif".format(counter), projection.astype(np.uint8)) accumulated += projection target = accumulated counter += 1 output = accumulated / counter
def main(ref_path, new_path, objname): # alineo las imagenes refdata = fits.getdata(os.path.join(STACK_PATH, ref_path))[250:-250, 250:-250] newdata = fits.getdata(os.path.join(STACK_PATH, new_path))[250:-250, 250:-250] try: trf, _ = aa.find_transform(newdata.astype('<f8'), refdata.astype('<f8')) new_aligned = aa.apply_transform(t, newdata.astype('<f8'), refdata.astype('<f8')) from skimage.transform import warp init_mask = np.zeros_like(newdata) outside_px_mask = warp(init_mask, inverse_map=trf.inverse, output_shape=refdata.shape, order=3, mode='constant', cval=1., clip=False, preserve_range=False) useful = np.where(outside_px_mask == 0) max_x = np.max(useful[0]) min_x = np.min(useful[0]) max_y = np.max(useful[1]) min_y = np.min(useful[1]) new_cropped = new_aligned[min_x:max_x, min_y:max_y] new_mask = outside_px_mask[min_x:max_x, min_y:max_y] ref_cropped = refdata[min_x:max_x, min_y:max_y] except: try: aa.MIN_MATCHES_FRACTION = 0.01 ref = si.SingleImage(refdata.astype('<f8')) new = si.SingleImage(newdata.astype('<f8')) ref.best_sources.sort(order='flux') new.best_sources.sort(order='flux') #~ import ipdb; ipdb.set_trace() rs = np.empty((len(ref.best_sources), 2)) j = 0 for x, y in ref.best_sources[['x', 'y']]: rs[j] = x, y j += 1 ns = np.empty((len(new.best_sources), 2)) j = 0 for x, y in new.best_sources[['x', 'y']]: ns[j] = x, y j += 1 if abs(len(ns) - len(rs)) > 50: max_l = np.max([len(ns), len(rs)]) ns = ns[:max_l] rs = rs[:max_l] trf, _ = aa.find_transform(ns, rs) new_aligned = aa.apply_transform(trf, newdata.astype('<f8'), refdata.astype('<f8')) from skimage.transform import warp init_mask = np.zeros_like(newdata) outside_px_mask = warp(init_mask, inverse_map=trf.inverse, output_shape=refdata.shape, order=3, mode='constant', cval=1., clip=False, preserve_range=False) useful = np.where(outside_px_mask == 0) max_x = np.max(useful[0]) min_x = np.min(useful[0]) max_y = np.max(useful[1]) min_y = np.min(useful[1]) new_cropped = new_aligned[min_x:max_x, min_y:max_y] new_mask = outside_px_mask[min_x:max_x, min_y:max_y] ref_cropped = refdata[min_x:max_x, min_y:max_y] except: #~ import ipdb; ipdb.set_trace() raise # las copio al lugar designado en la pipeline ref_h = fits.getheader(os.path.join(STACK_PATH, ref_path)) fits.writeto(data=ref_cropped.astype('<f4'), header=ref_h, filename=REFERENCE_IMAGE, overwrite=True) new_h = fits.getheader(os.path.join(STACK_PATH, new_path)) fits.writeto(data=new_cropped.astype('<f4'), header=new_h, filename=NEW_IMAGE, overwrite=True) # creo un file con los detalles meta = {} meta['object'] = objname meta['orig_ref_path'] = ref_path meta['orig_new_path'] = new_path with open(DETAILS_FILE, 'w') as fp: json.dump(meta, fp) return 0
def _find_transformation(self, image: Image): """ Iteratively try and find a valid transformation to align image with stored align reference. We perform 3 tries with growing image sizes of a centered image subset : 10%, 30% and 100% of image size :param image: the image to be aligned :type image: Image :return: the found transformation :raises: StackingError when no transformation is found using the whole image """ for ratio in [.1, .33, 1.]: top, bottom, left, right = self._get_image_subset_boundaries(ratio) # pick green channel if image has color if image.is_color(): new_subset = image.data[1][top:bottom, left:right] ref_subset = self._align_reference.data[1][top:bottom, left:right] else: new_subset = image.data[top:bottom, left:right] ref_subset = self._align_reference.data[top:bottom, left:right] try: _LOGGER.debug( f"Searching valid transformation on subset " f"with ratio:{ratio} and shape: {new_subset.shape}") transformation, matches = al.find_transform( new_subset, ref_subset) _LOGGER.debug( f"Found transformation with subset ratio = {ratio}") _LOGGER.debug(f"rotation : {transformation.rotation}") _LOGGER.debug(f"translation : {transformation.translation}") _LOGGER.debug(f"scale : {transformation.scale}") matches_count = len(matches[0]) _LOGGER.debug( f"image matched features count : {matches_count}") if matches_count < _MINIMUM_MATCHES_FOR_VALID_TRANSFORM: _LOGGER.debug( f"Found transformation but matches count is too low : " f"{matches_count} < {_MINIMUM_MATCHES_FOR_VALID_TRANSFORM}. " "Discarding transformation") raise StackingError("Too few matches") return transformation # pylint: disable=W0703 except Exception as alignment_error: # we have no choice but catching Exception, here. That's what AstroAlign raises in some cases # this will catch MaxIterError as well... if ratio == 1.: raise StackingError(alignment_error) _LOGGER.debug( f"Could not find valid transformation on subset with ratio = {ratio}." ) continue
def test_consistent_result(self): t1, __ = aa.find_transform(source=self.image, target=self.image_ref) for i in range(5): t2, __ = aa.find_transform(source=self.image, target=self.image_ref) self.assertLess(np.linalg.norm(t1.params - t2.params), 1e-10)
image = fits.open("light_00001.fit") rgb_ref = image[0].data image.close() plt.ion() fig = plt.figure() ax = fig.add_subplot(111) new_rvb = np.rollaxis((np.array(rgb_ref) / 65535.)**(1 / 3.), 0, 3) test = ax.imshow(new_rvb) plt.draw() plt.pause(1) for i in tqdm(nb_im_str): image = fits.open("light_" + str(i) + ".fit") rgb_new = image[0].data image.close() p, (pos_img, pos_img_rot) = al.find_transform(rgb_new[1], rgb_ref[1]) rgb_align = [] for j in tqdm(range(3)): rgb_align.append( al.apply_transform(p, rgb_new[j], rgb_ref[j]) + rgb_ref[j]) rgb_align[j] = np.where(rgb_align[j] < 65535, rgb_align[j], 65535) rgb_ref = rgb_align new_rvb = np.rollaxis((np.array(rgb_ref) / 65535.)**(1 / 3.), 0, 3) test = ax.imshow(new_rvb) plt.draw() plt.pause(0.1) plt.show()
from . import value_objects logger = logging.getLogger(__name__) if __name__ == "__main__": base_file_path = argv[1] target_file_paths = argv[2:] base_image = value_objects.ImageFile.load(base_file_path) if not base_image.meta.stars: raise Exception("Base file %s not registered", base_file_path) for target_file_path in target_file_paths: target_image = value_objects.ImageFile.load(target_file_path) if not target_image.meta.stars: logger.error("Target file %s not registered", target_file_path) else: logger.error("Aligning file %s", target_file_path) transformation, (s_list, t_list) = aa.find_transform(base_image.meta.stars_as_tuples, target_image.meta.stars_as_tuples) if target_image.meta.transformations is None: target_image.meta.transformations = {} target_image.meta.transformations[base_image.meta.uuid] = transformation.params target_image.save()
# we are transforming the broadband image to match the narrowband registered, footprint = aa.register(rband8Rgb1, halpha8Rgb1) # some opencv magic to clean up the image registered = cv2.normalize(registered, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U) # save file cv2.imwrite(OUTPUTFILE, np.uint8(registered)) # get transformation data for the hell of it p, (pos_img, pos_img_rot) = aa.find_transform(rband8Rgb1, halpha8Rgb1) print("Rotation: {:.2f} degrees".format(p.rotation * 180.0 / np.pi)) print("\nScale factor: {:.2f}".format(p.scale)) print("\nTranslation: (x, y) = ({:.2f}, {:.2f})".format(*p.translation)) print("\nTranformation matrix:\n{}".format(p.params)) print("\nPoint correspondence:") for (x1, y1), (x2, y2) in zip(pos_img, pos_img_rot): print("({:.2f}, {:.2f}) is source --> ({:.2f}, {:.2f}) in target".format( x1, y1, x2, y2)) print("==========================") print("Preparing to complete WCS solve...") time.sleep(3) # connect to api, get token apiKey = "oppqxlkuubsdwsok" # Key from Simon Mahns R = requests.post('http://nova.astrometry.net/api/login',
# thresh=95, # maxval=255, # # type=cv.THRESH_OTSU # type=cv.THRESH_BINARY #) #target_image_luma = cv.adaptiveThreshold(src=target_image_luma, # maxValue=255, # adaptiveMethod=cv.ADAPTIVE_THRESH_GAUSSIAN_C, # thresholdType=cv.THRESH_BINARY, # blockSize=11, # C=2) cv.imwrite("{:03d}_target.tiff".format(counter), target_image_luma) try: transf, (source_list, target_list) = aa.find_transform( source=source_image_luma, target=target_image_luma, max_control_points=MAX_CONTROL_POINTS, detection_sigma=DETECTION_SIGMA, min_area=MIN_AREA) projection_0, footprint = aa.apply_transform( transf, source_image[:, :, 0], target_image[:, :, 0]) projection_1, footprint = aa.apply_transform( transf, source_image[:, :, 1], target_image[:, :, 1]) projection_2, footprint = aa.apply_transform( transf, source_image[:, :, 2], target_image[:, :, 2]) projection_image = np.stack( [projection_0, projection_1, projection_2], axis=2) cv.imwrite("{:03d}.tiff".format(counter), projection_image.astype(np.uint8)) accumulated_image += projection_image source_image = target_image counter += 1
#%% Load images to align hdu_list_6 = fits.open('../images/stars06_p.fits') header_6 = hdu_list_6[0].header image_6 = hdu_list_6[0].data hdu_list_6.close() hdu_list_7 = fits.open('../images/stars07_p.fits') header_7 = hdu_list_7[0].header image_7 = hdu_list_7[0].data hdu_list_7.close() #%% Find the image transformation # The transformation is returned in the `p` object. The two objects `img_6_srcs` # and `img_7_srcs` give the coordinates of the sources found by the #`astroalign.find_transform()` fucntion. p, (img_6_srcs, img_7_srcs) = aa.find_transform(image_6, image_7) #%% Transform the `source` image # Unfortunatly, in version 2.4 of `astroalign` we have to convert the image data # type for our images to unsigned 16 bit integers (`uint16`) to make the # transform function work properly. We do this using the image method # `astype('uint16')`. image_6_aligned, footprint = aa.apply_transform(p, image_6.astype('uint16'), image_7.astype('uint16'), fill_value=1000) #%% Write the shifted image to a new FITS file # Creating a new header for the file and add new keyword to the file that # specifies how the image was aligned. header_6_aligned = header_6
import astroalign as aa from PIL import Image from matplotlib import image import numpy as np import matplotlib.pyplot as plt from scipy.ndimage import rotate THRESHOLD_VALUE = 100 a = Image.open('i1.png') aBw = a.convert('L') imgData = np.asarray(aBw) aBin = (imgData > THRESHOLD_VALUE) * 1.0 #plt.imshow(thresholdedData) #plt.show() a = Image.open('i2.png') aBw = a.convert('L') imgData = np.asarray(aBw) bBin = (imgData > THRESHOLD_VALUE) * 1.0 p, (pos_img, pos_img_rot) = aa.find_transform(aBin, bBin) print("\nTranslation: (x, y) = ({:.2f}, {:.2f})".format(*p.translation)) print("Rotation: {:.2f} degrees".format(p.rotation * 180.0 / np.pi)) print("\nScale factor: {:.2f}".format(p.scale))
def stack_live(work_path, im_path, counter, ref=[], first_ref=[], save_im=False, align=True, stack_methode="Sum"): """ function for process image, align and stack :param work_path: string, path of work folder :param im_path: string, path of process image :param ref: np.array, stack image (no for first image) :param first_ref: np.array, first image process, ref for alignement (no for first image) :param counter: int, number of image stacked :param save_im: bool, option for save image in fit :param align: bool, option for align image or not :param stack_methode: string, stack methode ("sum" or "mean") :return: image: np.array 3xMxN or MxN im_limit: int, bit limit (255 or 65535) im_mode: string, mode : "rgb" or "gray" TODO: Add dark possibility """ # test image format ".fit" or ".fits" or other if im_path.rfind(".fit") != -1: if im_path[im_path.rfind(".fit"):] == ".fit": extension = ".fit" elif im_path[im_path.rfind(".fit"):] == ".fits": extension = ".fits" raw_im = False else: # Other format = raw camera format (cr2, ...) extension = im_path[im_path.rfind("."):] raw_im = True # remove extension of path name = im_path.replace(extension, '') # remove path, juste save image name name = name[name.rfind("/") + 1:] if not raw_im: # open new image new_fit = fits.open(im_path) new = new_fit[0].data # save header new_header = new_fit[0].header new_fit.close() # test data type im_limit, im_type = test_utype(new) # test rgb or gray new, im_mode = test_and_debayer_to_rgb(new_header, new) else: print("convert DSLR image ...") new = rawpy.imread(im_path).postprocess(gamma=(1, 1), no_auto_bright=True, output_bps=16) im_mode = "rgb" extension = ".fits" im_limit = 2. ** 16 - 1 im_type = "uint16" new = np.rollaxis(new, 2, 0) # ____________________________________ # specific part for no first image # choix rgb ou gray scale print("alignement and stacking...") # choix du mode (rgb or B&W) if im_mode == "rgb": if align: # alignement with green : p, __ = al.find_transform(new[1], first_ref[1]) # stacking stack_image = [] for j in tqdm(range(3)): if align: # align all color : align_image = al.apply_transform(p, new[j], ref[j]) else: align_image = new[j] # chose stack methode # need convert to float32 for excess value if stack_methode == "Sum": stack = np.float32(align_image) + np.float32(ref[j]) elif stack_methode == "Mean": stack = ((counter - 1) * np.float32(ref[j]) + np.float32(align_image)) / counter else: raise ValueError("Stack method is not support") # filter excess value > limit if im_type == 'uint8': stack_image.append(np.uint8(np.where(stack < 2 ** 8 - 1, stack, 2 ** 8 - 1))) elif im_type == 'uint16': stack_image.append(np.uint16(np.where(stack < 2 ** 16 - 1, stack, 2 ** 16 - 1))) del stack del new elif im_mode == "gray": if align: # alignement p, __ = al.find_transform(new, first_ref) align_image = al.apply_transform(p, new, ref) del p else: align_image = new del new # chose stack methode # need convert to float32 for excess value if stack_methode == "Sum": stack = np.float32(align_image) + np.float32(ref) elif stack_methode == "Mean": stack = ((counter - 1) * np.float32(ref) + np.float32(align_image)) / counter else: raise ValueError("Stack method is not support") # filter excess value > limit if im_type == 'uint8': stack_image = np.uint8(np.where(stack < 2 ** 8 - 1, stack, 2 ** 8 - 1)) elif im_type == 'uint16': stack_image = np.uint16(np.where(stack < 2 ** 16 - 1, stack, 2 ** 16 - 1)) del stack else: raise ValueError("Mode not support") image = np.array(stack_image) # _____________________________ if save_im: # save stack image in fit red = fits.PrimaryHDU(data=image) red.writeto(work_path + "/" + "stack_image_" + name + extension) # delete image in memory del red return image, im_limit, im_mode