示例#1
0
def AnnularPCA(cube, angs, fwhm, name_input):

	print( "\n --- ANNULAR PCA")

	print("Performing Annular PCA...")

	ann_pca_output = vip.pca.pca_local.pca_annular(
							cube, angs, fwhm=fwhm,
							full_output = True, verbose = True
							)

	# Outputs three objects in this order:
	#	cube_out: cube of residuals
	#	cub_der: derotated cube of residuals
	#	frame: Annular PCA frame

	hciplot.plot_frames( 
			ann_pca_output[2], 
			label = 'AnnPCA reduction of {name}'.format(name=name_input), 
			grid = False, 
			size_factor = 5
			)

	print( 'Would you like to save the Annular PCA reduced images?' )
	questionsave = Checkfunction()

	if questionsave == 0:	
		vip.fits.write_fits('AnnPCA_{name}.fits'.format(name=name_input), ann_pca_output[2], verbose=True)
		vip.fits.write_fits('AnnPCA_residuals_{name}.fits'.format(name=name_input), ann_pca_output[1], verbose=True) 

	annpca_frame = ann_pca_output[2]
	residuals_stim_input = ann_pca_output[1]

	return annpca_frame, residuals_stim_input
示例#2
0
    def inspect_patch(self, xy, cmap='bone', dpi=40):
        """
        """
        if self.probas is None:
            raise RuntimeError("You must run the predictor first")

        x_input, y_input = xy
        if not isinstance(xy, tuple):
            raise TypeError("`xy` must be a tuple")

        for i, coord in enumerate(self.coords):
            if coord[0] == y_input and coord[1] == x_input:
                index = i

        if index is None:
            raise ValueError("Input coordinates not found")

        prob = self.probas[index]
        print("Proba : " + str(prob))
        sample = np.squeeze(self.patches[index])
        max_slices = sample.shape[0]
        if self.sample_type == 'tmlar4d':
            for i in range(sample.shape[1]):
                plot_frames(tuple(sample[:, i]), axis=False,
                            colorbar=False, cmap=cmap, dpi=dpi, horsp=0.05)
        else:
            plot_frames(tuple(sample), axis=False, colorbar=False,
                        cmap=cmap, dpi=dpi, horsp=0.05)
示例#3
0
文件: detection.py 项目: zuzhaoye/VIP
    def check_blobs(array, coords_temp, fwhm, debug):
        y_temp = coords_temp[:, 0]
        x_temp = coords_temp[:, 1]
        coords = []
        # Fitting a 2d gaussian to each local maxima position
        for y, x in zip(y_temp, x_temp):
            subsi = 3 * int(np.ceil(fwhm))
            if subsi % 2 == 0:
                subsi += 1

            if mode in ('lpeaks', 'log', 'dog'):
                scy = y + pad
                scx = x + pad
            elif mode in ('snrmap', 'snrmapf'):
                scy = y
                scx = x
            subim, suby, subx = get_square(array,
                                           subsi,
                                           scy,
                                           scx,
                                           position=True,
                                           force=True,
                                           verbose=False)
            cy, cx = frame_center(subim)

            gauss = models.Gaussian2D(amplitude=subim.max(),
                                      x_mean=cx,
                                      y_mean=cy,
                                      theta=0,
                                      x_stddev=fwhm * gaussian_fwhm_to_sigma,
                                      y_stddev=fwhm * gaussian_fwhm_to_sigma)

            sy, sx = np.indices(subim.shape)
            fitter = fitting.LevMarLSQFitter()
            fit = fitter(gauss, sx, sy, subim)

            # checking that the amplitude is positive > 0
            # checking whether the x and y centroids of the 2d gaussian fit
            # coincide with the center of the subimage (within 2px error)
            # checking whether the mean of the fwhm in y and x of the fit
            # are close to the FWHM_PSF with a margin of 3px
            fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
            fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
            mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
            condyf = np.allclose(fit.y_mean.value, cy, atol=2)
            condxf = np.allclose(fit.x_mean.value, cx, atol=2)
            condmf = np.allclose(mean_fwhm_fit, fwhm, atol=3)
            if fit.amplitude.value > 0 and condxf and condyf and condmf:
                coords.append(
                    (suby + fit.y_mean.value, subx + fit.x_mean.value))

                if debug:
                    print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
                    print('fit peak = {:.3f}'.format(fit.amplitude.value))
                    msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
                    print(msg.format(fwhm_y, fwhm_x))
                    print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
                    if plot:
                        plot_frames(subim, colorbar=True, axis=False, dpi=60)
        return coords
示例#4
0
def create_cube_with_satspots(n_frames=6, wh=31, star_fwhm=3, debug=False):
    global seed
    shape = (n_frames, wh, wh)
    star = create_cube_with_gauss2d(shape=shape, mean=wh // 2, stddev=star_fwhm)

    # make sure satspot is neither too close to star nor at the edge of the
    # image
    diagonal = seed.uniform(4 * star_fwhm, wh // 2)
    d = diagonal / np.sqrt(2)

    sat1_coords = (wh // 2 - d, wh // 2 + d)
    sat2_coords = (wh // 2 + d, wh // 2 + d)
    sat3_coords = (wh // 2 - d, wh // 2 - d)
    sat4_coords = (wh // 2 + d, wh // 2 - d)

    sat1 = create_cube_with_gauss2d(shape=shape, mean=sat1_coords, stddev=1)
    sat2 = create_cube_with_gauss2d(shape=shape, mean=sat2_coords, stddev=1)
    sat3 = create_cube_with_gauss2d(shape=shape, mean=sat3_coords, stddev=1)
    sat4 = create_cube_with_gauss2d(shape=shape, mean=sat4_coords, stddev=1)

    cube = star + sat1 + sat2 + sat3 + sat4

    if debug:
        hciplot.plot_frames(cube[0])

    return cube, [sat1_coords, sat2_coords, sat3_coords, sat4_coords]
示例#5
0
    def check_blobs(array, coords_temp, fwhm, debug):
        y_temp = coords_temp[:, 0]
        x_temp = coords_temp[:, 1]
        coords = []
        # Fitting a 2d gaussian to each local maxima position
        for y, x in zip(y_temp, x_temp):
            subsi = 3 * int(np.ceil(fwhm))
            if subsi % 2 == 0:
                subsi += 1

            if mode in ('lpeaks', 'log', 'dog'):
                scy = y + pad
                scx = x + pad
            elif mode in ('snrmap', 'snrmapf'):
                scy = y
                scx = x
            subim, suby, subx = get_square(array, subsi, scy, scx,
                                           position=True, force=True,
                                           verbose=False)
            cy, cx = frame_center(subim)

            gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx,
                                      y_mean=cy, theta=0,
                                      x_stddev=fwhm*gaussian_fwhm_to_sigma,
                                      y_stddev=fwhm*gaussian_fwhm_to_sigma)

            sy, sx = np.indices(subim.shape)
            fitter = fitting.LevMarLSQFitter()
            fit = fitter(gauss, sx, sy, subim)

            # checking that the amplitude is positive > 0
            # checking whether the x and y centroids of the 2d gaussian fit
            # coincide with the center of the subimage (within 2px error)
            # checking whether the mean of the fwhm in y and x of the fit
            # are close to the FWHM_PSF with a margin of 3px
            fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
            fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
            mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
            condyf = np.allclose(fit.y_mean.value, cy, atol=2)
            condxf = np.allclose(fit.x_mean.value, cx, atol=2)
            condmf = np.allclose(mean_fwhm_fit, fwhm, atol=3)
            if fit.amplitude.value > 0 and condxf and condyf and condmf:
                coords.append((suby + fit.y_mean.value,
                               subx + fit.x_mean.value))

                if debug:
                    print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
                    print('fit peak = {:.3f}'.format(fit.amplitude.value))
                    msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
                    print(msg.format(fwhm_y, fwhm_x))
                    print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
                    if plot:
                        plot_frames(subim, colorbar=True, axis=False, dpi=60)
        return coords
示例#6
0
def do_recenter(method, cube, shiftx, shifty, errormsg, mse=1e-2,
                mse_skip_first=False, n_frames=6, debug=False, **kwargs):
    #===== shift cube
    shifted_cube = shift_cube(cube, shiftx, shifty)

    if debug:
        html("<h3>===== {}({}) =====</h3>".format(
            method.__name__,
            ", ".join("{}={}".format(k, v) for k, v in kwargs.items())))

    #===== recentering
    rec_res = method(shifted_cube, debug=debug, **kwargs)
    
    recentered_cube= rec_res[0]
    unshifty= rec_res[1]
    unshiftx= rec_res[2]

    if debug:
        hciplot.plot_frames(cube, title="input cube")
        hciplot.plot_frames(shifted_cube, title="shifted cube")
        hciplot.plot_frames(recentered_cube, title="recentered cube")

    if debug:
        hciplot.plot_frames(cube[1], recentered_cube[1], shifted_cube[1],
                            label=["cube[1]", "recentered[1]", "shifted[1]"])

    if mse_skip_first:
        if debug:
            print("\033[33mfirst shift ignored for MSE\033[0m")
        shiftx = shiftx[1:]
        shifty = shifty[1:]
        unshiftx = unshiftx[1:]
        unshifty = unshifty[1:]

    if debug:
        try:
            import pandas as pd
            from IPython.display import display
            p = pd.DataFrame(np.array([
                shiftx,
                -unshiftx,
                shifty,
                -unshifty
            ]).T, columns=["x", "un-x", "y", "un-y"])
            print("\033[33mshifts:\033[0m")
            display(p)
        except:
            print("\033[33mcalculated shifts:\033[0m", unshiftx)
            print(" " * 18, unshifty)
            print("\033[33moriginal shifts\033[0m:  ", -shiftx)
            print(" " * 18, -shifty)
        print("\033[33merrors:\033[0m", mean_squared_error(
            shiftx, -unshiftx), mean_squared_error(shifty, -unshifty))

    #===== verify error
    assert mean_squared_error(shiftx, -unshiftx) < mse, errormsg
    assert mean_squared_error(shifty, -unshifty) < mse, errormsg

    if debug:
        print("\033[32mpassed.\033[0m")
示例#7
0
def LLSG(cube, angs, fwhm, name_input):

    print("\n   --- LLSG")
    rank = input("What rank of llsg would you like to do? (default = 6)\n")
    rank = int(rank)

    #Reduces the image using the LLSG algorithm then plots the image.
    print(fwhm)
    llsg_output = vip.llsg.llsg(cube,
                                angs,
                                fwhm=fwhm,
                                rank=rank,
                                thresh=2,
                                max_iter=20,
                                random_seed=10,
                                full_output=True)

    # LLSG full output is:
    #0	list_l_array_der
    #1	list_s_array_der
    #2	list_g_array_der
    #3	frame_l
    #4	frame_s
    #5	frame_g
    # Each is the frame and residual cube for the three noise types:
    # l (low-rank), s (sparse), g (gaussian)
    # Companions are in the Sparse noise.
    # To get STIM map of LLSG, input 'list_s_array_der' into STIM Map

    llsg_frame = llsg_output[4]
    llsg_residual_sparse = np.asarray(llsg_output[1][0])
    #list_s_array_der is a list of one member,
    # this one member is another list of the 24 residual images
    # So get that one element, and make it an array.

    hciplot.plot_frames(
        llsg_frame,
        label='LLSG reduced image of {name}'.format(name=name_input))

    #Loop asks user if the would like to save the image.
    print('Would you like to save the LLSG reduced image?\n')
    questionsave = Checkfunction()

    if questionsave == 0:
        vip.fits.write_fits('new_LLSG_{name}.fits'.format(name=name_input),
                            llsg_frame,
                            verbose=True)

    return llsg_frame, llsg_residual_sparse, rank
示例#8
0
def prepare_patches(cube,
                    angle_list,
                    xy,
                    fwhm,
                    patch_size_px,
                    delta_rot=0.5,
                    normalization='slice',
                    imlib='opencv',
                    interpolation='bilinear',
                    debug=False):
    """ Prepare patches for SODINN-PW.
    """
    centy_fr, centx_fr = frame_center(cube[0])

    angle_list = check_pa_vector(angle_list)

    xy_dist = dist(centy_fr, centx_fr, xy[1], xy[0])
    res = _pairwise_diff_residuals(cube,
                                   angle_list,
                                   ann_center=xy_dist,
                                   fwhm=fwhm,
                                   delta_rot=delta_rot,
                                   debug=False)

    res_der = cube_derotate(res,
                            angle_list,
                            imlib=imlib,
                            interpolation=interpolation)
    res_der_crop = cube_crop_frames(res_der,
                                    patch_size_px,
                                    xy=xy,
                                    verbose=False,
                                    force=True)

    patches = normalize_01_pw(res_der_crop, normalization)

    if debug:
        print('dist : {}'.format(xy_dist))
        plot_frames(
            tuple(patches),
            axis=False,
            colorbar=False,
        )
    return patches
示例#9
0
def StimMap(residuals_cube, name_input, origin_algo):
    print("Computing STIM map using {algo} output...".format(algo=origin_algo))
    stim_map = vip.metrics.compute_stim_map(residuals_cube)

    hciplot.plot_frames(stim_map,
                        label='STIM map of {name} using {algo}'.format(
                            name=name_input, algo=origin_algo),
                        grid=False,
                        size_factor=5)

    # Loop asks user if they would like to save the image.
    print('Would you like to save the image?')
    questionsave = Checkfunction()

    if questionsave == 0:
        vip.fits.write_fits('new_STIM_{algo}_{name}.fits'.format(
            algo=origin_algo, name=name_input),
                            stim_map,
                            verbose=True)
示例#10
0
    def inspect_probmap(self, vmin_log=1e-10, labelsize=10, circlerad=10,
                        circlecolor='white', circlealpha=0.6, grid=True,
                        gridspacing=10, gridalpha=0.2, showcent=True,
                        print_info=True, **kwargs):
        """
        from matplotlib.pyplot import hist, figure

        vec = pred_svd.pmap.flatten()
        _ = hist(vec[vec > 0], bins=np.sqrt(vec.shape[0]).astype(int))
        figure()
        _ = hist(np.log(vec[vec > 0]), bins=np.sqrt(vec.shape[0]).astype(int))

        """
        if print_info:
            self.print_info()

        plot_frames((self.pmap, self.pmap), log=(False, True),
                    vmin=(0, vmin_log), vmax=(1, 1),
                    label=('Probmap', 'Probmap (logscale)'),
                    label_size=labelsize, circle_radius=circlerad,
                    circle_color=circlecolor, circle_alpha=circlealpha,
                    show_center=showcent, grid=grid, grid_alpha=gridalpha,
                    grid_spacing=gridspacing, **kwargs)
示例#11
0
    def find_star_unsat(self, unsat_list, verbose=True, debug=False):
        self.unsat_star_pos = {}
        y_star = []
        x_star = []
        for un, fits_name in enumerate(unsat_list):
            tmp = np.median(open_fits(self.outpath + '1_crop_unsat_' +
                                      fits_name,
                                      header=False),
                            axis=0)
            table_res = detection(tmp,
                                  fwhm=1.2 * self.resel,
                                  bkg_sigma=1,
                                  mode='lpeaks',
                                  matched_filter=False,
                                  mask=True,
                                  snr_thresh=10,
                                  plot=debug,
                                  debug=debug,
                                  full_output=debug,
                                  verbose=verbose)
            y_star = np.append(x_star, table_res['y'][0])
            x_star = np.append(y_star, table_res['x'][0])

        self.unsat_star_pos['y'] = y_star
        self.unsat_star_pos['x'] = x_star
        self.unsat_star_pos['fname'] = unsat_list
        if verbose:
            print('The star has been located in the unsat cubes')
        if debug:
            snr_star = table_res['px_snr']
            for un, fits_name in enumerate(unsat_list):
                tmp = np.median(open_fits(self.outpath + '1_crop_unsat_' +
                                          fits_name,
                                          header=False),
                                axis=0)
                plot_frames(tmp, circle=(y_star[un], x_star[un]))
示例#12
0
文件: roc.py 项目: avigan/VIP
def compute_binary_map(frame, thresholds, injections, fwhm, npix=1,
                       overlap_threshold=0.7, max_blob_fact=2, plot=False,
                       debug=False):
    """
    Take a list of ``thresholds``, create binary maps and counts detections/fps.
    A blob which is "too big" is split into apertures, and every aperture adds
    one 'false positive'.

    Parameters
    ----------
    frame : numpy ndarray
        Detection map.
    thresholds : list or numpy ndarray
        List of thresholds (detection criteria).
    injections : tuple, list of tuples
        Coordinates (x,y) of the injected companions. Also accepts 1d/2d
        ndarrays.
    fwhm : float
        FWHM, used for obtaining the size of the circular aperture centered at
        the injection position (and measuring the overlapping with found blobs).
        The circular aperture has 2 * FWHM in diameter.
    npix : int, optional
        The number of connected pixels, each greater than the given threshold,
        that an object must have to be detected. ``npix`` must be a positive
        integer. Passed to ``detect_sources`` function from ``photutils``.
    overlap_threshold : float
        Percentage of overlap a blob has to have with the aperture around an
        injection.
    max_blob_fact : float
        Maximum size of a blob (in multiples of the resolution element) before
        it is considered as "too big" (= non-detection).
    plot : bool, optional
        If True, a final resulting plot summarizing the results will be shown.
    debug : bool, optional
        For showing optional information.

    Returns
    -------
    list_detections : list of int
        List of detection count for each threshold.
    list_fps : list of int
        List of false positives count for each threshold.
    list_binmaps : list of 2d ndarray
        List of binary maps: detection maps thresholded for each threshold
        value.

    """
    def _overlap_injection_blob(injection, fwhm, blob_mask):
        """
        Parameters
        ----------
        injection: tuple (y,x)
        fwhm : float
        blob_mask : 2d bool ndarray

        Returns
        -------
        overlap_fact : float between 0 and 1
            Percentage of the area overlap. If the blob is smaller than the
            resolution element, this is ``intersection_area / blob_area``,
            otherwise ``intersection_area / resolution_element``.

        """
        if len(injections[0]) > 0:
            injection_mask = get_circle(np.ones_like(blob_mask), radius=fwhm,
                                        cy=injection[1], cx=injection[0],
                                        mode="mask")
        else:
            injection_mask = np.zeros_like(blob_mask)
        intersection = injection_mask & blob_mask
        smallest_area = min(blob_mask.sum(), injection_mask.sum())
        return intersection.sum() / smallest_area

    # --------------------------------------------------------------------------
    list_detections = []
    list_fps = []
    list_binmaps = []
    sizey, sizex = frame.shape
    cy, cx = frame_center(frame)
    reselem_mask = get_circle(frame, radius=fwhm, cy=cy, cx=cx, mode="val")
    npix_circ_aperture = reselem_mask.shape[0]

    # normalize injections: accepts combinations of 1d/2d and tuple/list/array.
    injections = np.asarray(injections)
    if injections.ndim == 1:
        injections = np.array([injections])

    for ithr, threshold in enumerate(thresholds):
        if debug:
            print("\nprocessing threshold #{}: {}".format(ithr + 1, threshold))

        segments = detect_sources(frame, threshold, npix, connectivity=4)
        binmap = (segments.data != 0)

        if debug:
            plot_frames((segments.data, binmap), cmap=('tab20b', 'binary'),
                        circle=tuple(tuple(xy) for xy in injections),
                        circle_radius=fwhm, circle_alpha=0.6,
                        label=("segmentation map", "binary map"))

        detections = 0
        fps = 0

        for segment in segments.segments:
            label = segment.label
            blob_mask = (segments.data == label)
            blob_area = segment.area

            if debug:
                lab = "blob #{}, area={}px**2".format(label, blob_area)
                plot_frames(blob_mask, circle_radius=fwhm, circle_alpha=0.6,
                            circle=tuple(tuple(xy) for xy in injections),
                            cmap='binary', label_size=8, label=lab,
                            size_factor=3)

            for iinj, injection in enumerate(injections):
                if len(injections[0]) > 0:  # checking injections is not empty
                    if injection[0] > sizex or injection[1] > sizey:
                        raise ValueError("Wrong coordinates in `injections`")

                    if debug:
                        print("\ttesting injection #{} at {}".format(iinj + 1,
                                                                     injection))

                if blob_area > max_blob_fact * npix_circ_aperture:
                    number_of_apertures_in_blob = blob_area / npix_circ_aperture
                    fps += number_of_apertures_in_blob  # float, rounded at end
                    if debug:
                        print("\tblob is too big (+{:.0f} fps)"
                              "".format(number_of_apertures_in_blob))
                        print("\tskipping all other injections")
                    # continue with next blob, do not check other injections
                    break

                overlap = _overlap_injection_blob(injection, fwhm, blob_mask)
                if overlap > overlap_threshold:
                    if debug:
                        print("\toverlap of {}! (+1 detection)"
                              "".format(overlap))

                    detections += 1
                    # continue with next blob, do not check other injections
                    break

                if debug:
                    print("\toverlap of {} -> do nothing".format(overlap))

            else:
                if debug:
                    print("\tdid not find a matching injection for this "
                          "blob (+1 fps)")
                fps += 1

        if debug:
            print("done with threshold #{}".format(ithr))
            print("result: {} detections, {} fps".format(detections, fps))

        fps = np.round(fps).astype(int).item()  # -> python `int`

        list_detections.append(detections)
        list_binmaps.append(binmap)
        list_fps.append(fps)

    if plot:
        labs = tuple(str(det) + ' detections' + '\n' + str(fps) +
                     ' false positives' for det, fps in zip(list_detections,
                                                            list_fps))
        if len(injections[0]) > 0:
            circles = tuple(tuple(xy) for xy in injections)
        else:
            circles = None
        plot_frames(tuple(list_binmaps), title='Final binary maps', label=labs,
                    label_size=8, cmap='binary', circle_alpha=0.8,
                    circle=circles, circle_radius=fwhm,
                    circle_color='deepskyblue', axis=False)

    return list_detections, list_fps, list_binmaps
示例#13
0
文件: roc.py 项目: avigan/VIP
    def plot_detmaps(self, i=None, thr=9, dpi=100,
                     axis=True, grid=False, vmin=-10, vmax='max',
                     plot_type="horiz"):
        """
        Plot the detection maps for one injection.

        Parameters
        ----------
        i : int or None, optional
            Index of the injection, between 0 and self.n_injections. If None,
            takes the 30st injection, or if there are less injections, the
            middle one.
        thr : int, optional
            Index of the threshold.
        dpi, axis, grid, vmin, vmax
            Passed to ``pp_subplots``
        plot_type : {"horiz" or "vert"}, optional
            Plot type.

            ``horiz``
                One row per algorithm (frame, probmap, binmap)
            ``vert``
                1 row for final frames, 1 row for probmaps and 1 row for binmaps

        """
        # input parameters
        if i is None:
            if len(self.list_xy) > 30:
                i = 30
            else:
                i = len(self.list_xy) // 2

        if vmax == 'max':
            # TODO: document this feature.
            vmax = np.concatenate([m.frames[i] for m in self.methods if
                                   hasattr(m, "frames") and
                                   len(m.frames) >= i]).max()/2

        # print information
        print('X,Y: {}'.format(self.list_xy[i]))
        print('dist: {:.3f}, flux: {:.3f}'.format(self.dists[i],
                                                  self.fluxes[i]))
        print()

        if plot_type in [1, "horiz"]:
            for m in self.methods:
                print('detection state: {} | false postives: {}'.format(
                    m.detections[i][thr], m.fps[i][thr]))
                labels = ('{} frame'.format(m.name), '{} S/Nmap'.format(m.name),
                          'Thresholded at {:.1f}'.format(m.thresholds[thr]))
                plot_frames((m.frames[i] if len(m.frames) >= i else
                            np.zeros((2, 2)), m.probmaps[i], m.bmaps[i][thr]),
                            label=labels, dpi=dpi, horsp=0.2, axis=axis,
                            grid=grid, cmap=['viridis', 'viridis', 'gray'])

        elif plot_type in [2, "vert"]:
            labels = tuple('{} frame'.format(m.name) for m in self.methods if
                           hasattr(m, "frames") and len(m.frames) >= i)
            plot_frames(tuple(m.frames[i] for m in self.methods if
                        hasattr(m, "frames") and len(m.frames) >= i),
                        dpi=dpi, label=labels, vmax=vmax, vmin=vmin, axis=axis,
                        grid=grid)

            plot_frames(tuple(m.probmaps[i] for m in self.methods), dpi=dpi,
                        label=tuple(['{} S/Nmap'.format(m.name) for m in
                                     self.methods]), axis=axis, grid=grid)

            for m in self.methods:
                msg = '{} detection: {}, FPs: {}'
                print(msg.format(m.name, m.detections[i][thr], m.fps[i][thr]))

            labels = tuple('Thresholded at {:.1f}'.format(m.thresholds[thr])
                           for m in self.methods)
            plot_frames(tuple(m.bmaps[i][thr] for m in self.methods),
                        dpi=dpi, label=labels, axis=axis, grid=grid,
                        colorbar=False, cmap='bone')
        else:
            raise ValueError("`plot_type` unknown")
示例#14
0
def plot_traindata(T,
                   zeroind=None,
                   oneind=None,
                   full_info=False,
                   plot_pair=True,
                   dpi=100,
                   indices=None,
                   save_plot=False):
    """
    """
    xarr = T.x
    yarr = T.y
    if 'xnor' in T:
        xarrn = T.xnor

    if zeroind is None:
        zeroind = np.random.randint(0, xarr.shape[0] / 2.)
    if oneind is None:
        oneind = np.random.randint(xarr.shape[0] / 2., xarr.shape[0])

    if full_info:
        msg1 = 'N samples : {} | Runtime : {}'
        print(msg1.format(T.nsamp, T.runtime))
        msg2 = 'FWHM : {} | PLSC : {} | K list : {}'
        print(msg2.format(T.fwhm, T.plsc, T.klist))
        msg3 = 'In Rad : {} | Out Rad : {} | Patch size : {}'
        print(msg3.format(T.inrad, T.outrad, T.sizepatch))
        msg4 = 'Collapse func : {} | Scaling : {}'
        print(msg4.format(T.collaf.__name__, T.scaling))
        msg5 = 'N patches : {} | Perc orig zeros : {}'
        print(msg5.format(T.npatches, T.perorigzeros))
        msg6 = 'Flux distro : {} | Par1 : {} | Par2 : {}'
        print(msg6.format(T.fluxdistro, T.fluxdistrop1, T.fluxdistrop2))
        msg7 = 'N injections : {} | Perc aug ones : {}'
        print(msg7.format(T.nsamp * 0.5 * T.peraugones, T.peraugones))
        msg8 = 'Aug shifts : {} | Aug range rotat : {}'
        print(msg8.format(T.shifts, T.rangerot))
        figure(figsize=(12, 2))
        subplot(1, 3, 1)
        hist(T.fluxes, bins=int(np.sqrt(T.fluxes.shape[0])))
        title('Fluxes histogram')
        subplot(1, 3, 2)
        hist(np.array(T.dists).flatten(), bins=int(np.sqrt(T.fluxes.shape[0])))
        title('Distances histogram')
        subplot(1, 3, 3)
        hist(np.array(T.thetas).flatten(),
             bins=int(np.sqrt(T.fluxes.shape[0])))
        title('Thetas histogram')
        show()
        print()

    npatches = xarr[zeroind].shape[0]
    if plot_pair or save_plot:
        if indices is not None:
            zerarr = xarr[zeroind][indices]
            onearr = xarr[oneind][indices]
            if xarrn is not None: zerarrn = xarrn[zeroind][indices]
            if xarrn is not None: onearrn = xarrn[oneind][indices]
        else:
            zerarr = xarr[zeroind]
            onearr = xarr[oneind]
            if xarrn is not None: zerarrn = xarrn[zeroind]
            if xarrn is not None: onearrn = xarrn[oneind]

        if save_plot:
            print('{} | Sample {}'.format(int(yarr[zeroind]), zeroind))
            plot_frames(zerarr,
                        dpi=dpi,
                        axis=False,
                        vmin=xarr[zeroind].min(),
                        vmax=xarr[zeroind].max(),
                        save='patch_zero.pdf',
                        colorbar=False,
                        horsp=0.1)
            if xarrn is not None:
                plot_frames(zerarrn,
                            axis=False,
                            dpi=dpi,
                            colorbar=False,
                            save='patch_zero_nor.pdf',
                            horsp=0.1)
            print(int(yarr[oneind]), '| Sample', oneind)
            plot_frames(onearr,
                        axis=False,
                        vmin=xarr[oneind].min(),
                        vmax=xarr[oneind].max(),
                        dpi=dpi,
                        save='patch_one.pdf',
                        colorbar=False,
                        horsp=0.1)
            if xarrn is not None:
                plot_frames(onearr,
                            axis=False,
                            dpi=dpi,
                            horsp=0.1,
                            save='patch_one_nor.pdf',
                            colorbar=False)

        else:
            plot_frames(zerarr,
                        title='Unnormalized ZERO multiK patch',
                        dpi=dpi,
                        axis=False,
                        vmin=xarr[zeroind].min(),
                        vmax=xarr[zeroind].max(),
                        horsp=0.1)
            if xarrn is not None:
                plot_frames(zerarrn,
                            title='Normalized ZERO multiK patch',
                            axis=False,
                            dpi=dpi,
                            horsp=0.1)
            plot_frames(onearr,
                        title='Unnormalized ONE multiK patch',
                        axis=False,
                        vmin=xarr[oneind].min(),
                        vmax=xarr[oneind].max(),
                        dpi=dpi,
                        horsp=0.1)
            if xarrn is not None:
                plot_frames(onearrn,
                            title='Normalized ONE multiK patch',
                            axis=False,
                            dpi=dpi,
                            horsp=0.1)
示例#15
0
def snrmap(array,
           fwhm,
           plot=False,
           mode='sss',
           source_mask=None,
           nproc=None,
           array2=None,
           use2alone=False,
           verbose=True,
           **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.

    Parameters
    ----------
    array : numpy.ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    plot : bool, optional
        If True plots the S/N map. False by default.
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy.ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d array_like
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()
    if array.ndim != 2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot:
        plt.close('snr')

    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if mode == 'sss':
        func = snr_ss
    elif mode == 'peakstddev':
        func = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')

    if source_mask is None:
        res = pool_map(nproc, func, array, iterable(coords), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if array.shape != source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []
        cirx = []
        anny = []
        annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y, x) in zip(soury, sourx):
            radd = dist(centery, centerx, y, x)
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus_segments(array, int(radd - fwhm),
                                                  int(np.ceil(2 * fwhm)))[0]
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1 * fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy)
            cirx += list(tempcx)
            anny += list(tempay)
            annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y, x) for (y, x) in zip(anny, annx)
                    if (y, x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y, x) for (y, x) in zip(yy, xx)
                     if (y, x) not in coor_ann]

        res = pool_map(nproc, func, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

        res = pool_map(nproc, func, array_sources, iterable(coor_ann), fwhm,
                       True, array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

    if plot:
        plot_frames(snrmap, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes.".format(nproc))
        timing(start_time)
    return snrmap
示例#16
0
def LLSG(cube, angs, fwhm, name_input):
"""
-----------------	LLSG	-----------------

Adpated from vip_hci. Returns LLSG frame, residual cube for STIM map and the rank for running contrast curve. 

When fulloutput == True, LLSG full output from VIP (0.9.11) is:
		0 - list_l_array_der
		1 - list_s_array_der
		2 - list_g_array_der
		3 - frame_l
		4 - frame_s
		5 - frame_g
    
    Each is the frame and residual cube for the three noise types:
    l (low-rank), s (sparse), g (gaussian)

    Companions are in the Sparse noise.
    
    To get STIM map of LLSG, input 'list_s_array_der' into STIM Map

Parameter: 
	cube : numpy ndarray, 3d
		Input ADI cube.
	angs : numpy ndarray, 1d
		Corresponding parallactic angle for each frame. Extracted from header of each .fits file
	fwhm : float
		Known size of the FHWM in pixels to be used.
	name_input : character string
		Name of the star (obtained from the first input)

Return:		
	llsg_frame : numpy ndarray, 2d
		Residual frame

	llsg_residual_sparse : numpy ndarray, 3d
		Residual Cube

	Rank : integer 
		Expected rank for the L component

"""

	print( "\n   --- LLSG")

	Rank = input("What rank of llsg would you like to do? (default =6)\n")
	Rank = int(Rank)

	print(fwhm)

	# Reduces the image using the LLSG algorithm then plots the image.
	llsg_output = vip.llsg.llsg(cube, angs, rank = Rank, fwhm=fwhm, thresh = 2, max_iter = 20, random_seed = 10,full_output = True)

	# Store the sparse frame
	llsg_frame = llsg_output[4]
	# Store the sparse residual cube
	llsg_residual_sparse = np.asarray( llsg_output[1][0] )
	
	# list_s_array_der is a list of one member,
	# this one member is another list of the 24 residual images
	# So get that one element, and make it an array.

	hciplot.plot_frames(llsg_frame, label ='LLSG reduced image of {name}'.format(name=name_input))
	
	
	# Asks user if the would like to save the image.
	print( 'Would you like to save the LLSG reduced image?\n' )
	questionsave = Checkfunction()

	if questionsave == 0:
		vip.fits.write_fits('new_LLSG_{name}.fits'.format(name=name_input), llsg_frame, verbose=True)


	return llsg_frame, llsg_residual_sparse, Rank
示例#17
0
    def flat_field_correction(self,
                              verbose=True,
                              debug_=False,
                              overwrite_basic=False):
        sci_list = []
        with open(self.outpath + "sci_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sci_list.append(line.split('\n')[0])

        sky_list = []
        with open(self.outpath + "sky_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sky_list.append(line.split('\n')[0])

        flat_list = []
        with open(self.outpath + "flat_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                flat_list.append(line.split('\n')[0])

        unsat_list = []
        with open(self.outpath + "unsat_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                unsat_list.append(line.split('\n')[0])

        flat_X = []
        flat_X_values = []
        tmp = open_fits(self.outpath + '1_crop_unsat_' + unsat_list[-1],
                        header=False)
        nx_unsat_crop = tmp.shape[2]

        for fl, flat_name in enumerate(flat_list):
            tmp, header = open_fits(self.inpath + flat_list[fl],
                                    header=True,
                                    verbose=debug_)
            flat_X.append(header['AIRMASS'])
            if fl == 0:
                flat_X_values.append(header['AIRMASS'])
            else:
                #creates a list rejecting airmass values that differ more than the tolerance.
                list_occ = [
                    isclose(header['AIRMASS'], x, atol=0.1)
                    for x in flat_X_values
                ]
                if True not in list_occ:
                    flat_X_values.append(header['AIRMASS'])

        print(flat_X)
        flat_X_values = np.sort(
            flat_X_values
        )  # !!! VERY IMPORTANT, DO NOT COMMENT, OR IT WILL SCREW EVERYTHING UP!!!
        print(flat_X_values)
        if verbose:
            print('The airmass values have been sorted into a list')

        # There should be 15 twilight flats in total with NACO; 5 at each airmass. BUG SOMETIMES!
        flat_tmp_cube_1 = np.zeros([5, self.com_sz, self.com_sz])
        flat_tmp_cube_2 = np.zeros([5, self.com_sz, self.com_sz])
        flat_tmp_cube_3 = np.zeros([5, self.com_sz, self.com_sz])
        counter_1 = 0
        counter_2 = 0
        counter_3 = 0

        flat_cube_3X = np.zeros([3, self.com_sz, self.com_sz])

        # TAKE MEDIAN OF each group of 5 frames with SAME AIRMASS
        flat_cube = open_fits(self.outpath + '1_crop_flat_cube.fits',
                              header=False,
                              verbose=debug_)
        for fl, self.flat_name in enumerate(flat_list):
            if find_nearest(
                    flat_X_values,
                    flat_X[fl]) == 0:  #could create the function find_nearest
                flat_tmp_cube_1[counter_1] = flat_cube[fl]
                counter_1 += 1
            elif find_nearest(flat_X_values, flat_X[fl]) == 1:
                flat_tmp_cube_2[counter_2] = flat_cube[fl]
                counter_2 += 1
            elif find_nearest(flat_X_values, flat_X[fl]) == 2:
                flat_tmp_cube_3[counter_3] = flat_cube[fl]
                counter_3 += 1

        flat_cube_3X[0] = np.median(flat_tmp_cube_1, axis=0)
        flat_cube_3X[1] = np.median(flat_tmp_cube_2, axis=0)
        flat_cube_3X[2] = np.median(flat_tmp_cube_3, axis=0)
        if verbose:
            print('The median flat cubes with same airmass have been created')

        med_fl = np.zeros(3)
        gains_all = np.zeros([3, self.com_sz, self.com_sz])
        #the method for avoiding the bad quadrant is removed since it is fixed in the preproc
        for ii in range(3):
            med_fl[ii] = np.median(flat_cube_3X[ii])
            gains_all[ii] = flat_cube_3X[ii] / med_fl[ii]
        master_flat_frame = np.median(gains_all, axis=0)
        if nx_unsat_crop < master_flat_frame.shape[1]:
            master_flat_unsat = frame_crop(master_flat_frame, nx_unsat_crop)
        else:
            master_flat_unsat = master_flat_frame

        write_fits(self.outpath + 'master_flat_field.fits', master_flat_frame)
        write_fits(self.outpath + 'master_flat_field_unsat.fits',
                   master_flat_unsat)

        #plots(master_flat_frame, master_flat_unsat)
        if verbose:
            print('master flat frames has been saved')

        master_flat_frame = open_fits(self.outpath + 'master_flat_field.fits')

        if overwrite_basic or not isfile(self.outpath + '2_ff_' +
                                         sci_list[-1]):
            for sc, fits_name in enumerate(sci_list):
                tmp = open_fits(self.outpath + '1_crop_' + fits_name,
                                verbose=debug_)
                tmp_tmp = np.zeros_like(tmp)
                for jj in range(tmp.shape[0]):
                    tmp_tmp[jj] = tmp[jj] / master_flat_frame
                write_fits(self.outpath + '2_ff_' + fits_name,
                           tmp_tmp,
                           verbose=debug_)
                if not debug_:
                    os.system("rm " + self.outpath + '1_crop_' + fits_name)
        if verbose:
            print('Done scaling SCI frames with respects to ff')

        if overwrite_basic or not isfile(self.outpath + '2_ff_' +
                                         sky_list[-1]):
            for sk, fits_name in enumerate(sky_list):
                tmp = open_fits(self.outpath + '1_crop_' + fits_name,
                                verbose=debug_)
                tmp_tmp = np.zeros_like(tmp)
                for jj in range(tmp.shape[0]):
                    tmp_tmp[jj] = tmp[jj] / master_flat_frame
                write_fits(self.outpath + '2_ff_' + fits_name,
                           tmp_tmp,
                           verbose=debug_)
                if not debug_:
                    os.system("rm " + self.outpath + '1_crop_' + fits_name)
        if verbose:
            print('Done scaling SKY frames with respects to ff ')

        # COMPARE BEFORE AND AFTER FLAT-FIELD
        tmp = np.median(open_fits(self.outpath + '2_ff_' + sci_list[0]),
                        axis=0)
        tmp_tmp = np.median(open_fits(self.outpath + '2_ff_' + sci_list[-1]),
                            axis=0)
        if debug_:
            old_tmp = np.median(open_fits(self.outpath + '1_crop_' +
                                          sci_list[0]),
                                axis=0)
            old_tmp_tmp = np.median(open_fits(self.outpath + '1_crop_' +
                                              sci_list[-1]),
                                    axis=0)
            plot_frames(old_tmp, tmp, old_tmp_tmp, tmp_tmp)
        else:
            plot_frames(tmp, tmp_tmp)

        master_flat_unsat = open_fits(self.outpath +
                                      'master_flat_field_unsat.fits')
        for un, fits_name in enumerate(unsat_list):
            tmp = open_fits(self.outpath + '1_crop_unsat_' + fits_name,
                            verbose=debug_)
            tmp_tmp = np.zeros_like(tmp)
            for jj in range(tmp.shape[0]):
                tmp_tmp[jj] = tmp[jj] / master_flat_unsat
            write_fits(self.outpath + '2_ff_unsat_' + fits_name,
                       tmp_tmp,
                       verbose=debug_)
            if not debug_:
                os.system("rm " + self.outpath + '1_crop_unsat_' + fits_name)

        if verbose:
            print('Done scaling UNSAT frames with respects to ff')

        # COMPARE BEFORE AND AFTER FLAT-FIELD
        tmp = open_fits(self.outpath + '2_ff_unsat_' + unsat_list[0])[-1]
        tmp_tmp = open_fits(self.outpath + '2_ff_unsat_' + unsat_list[-1])[-1]
        if debug_:
            old_tmp = open_fits(self.outpath + '1_crop_unsat_' +
                                unsat_list[0])[-1]
            old_tmp_tmp = open_fits(self.outpath + '1_crop_unsat_' +
                                    unsat_list[-1])[-1]
            plot_frames(old_tmp, tmp, old_tmp_tmp, tmp_tmp)
        else:
            plot_frames(tmp, tmp_tmp)
示例#18
0
def chisquare(modelParameters,
              cube,
              angs,
              plsc,
              psfs_norm,
              fwhm,
              annulus_width,
              aperture_radius,
              initialState,
              ncomp,
              cube_ref=None,
              svd_mode='lapack',
              scaling=None,
              fmerit='sum',
              collapse='median',
              imlib='opencv',
              interpolation='lanczos4',
              debug=False):
    """
    Calculate the reduced chi2:
    \chi^2_r = \frac{1}{N-3}\sum_{j=1}^{N} |I_j|,
    where N is the number of pixels within a circular aperture centered on the 
    first estimate of the planet position, and I_j the j-th pixel intensity.
    
    Parameters
    ----------    
    modelParameters: tuple
        The model parameters, typically (r, theta, flux).
    cube: numpy.array
        The cube of fits images expressed as a numpy.array.
    angs: numpy.array
        The parallactic angle fits image expressed as a numpy.array. 
    plsc: float
        The platescale, in arcsec per pixel.
    psfs_norm: numpy.array
        The scaled psf expressed as a numpy.array.    
    fwhm : float
        The FHWM in pixels.
    annulus_width: int, optional
        The width in terms of the FWHM of the annulus on which the PCA is done.       
    aperture_radius: int, optional
        The radius of the circular aperture in terms of the FWHM.
    initialState: numpy.array
        Position (r, theta) of the circular aperture center.
    ncomp: int
        The number of principal components.
    cube_ref : numpy ndarray, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.         
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    imlib : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
    interpolation : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
        
    Returns
    -------
    out: float
        The reduced chi squared.
        
    """
    try:
        r, theta, flux = modelParameters
    except TypeError:
        msg = 'modelParameters must be a tuple, {} was given'
        print(msg.format(type(modelParameters)))

    # Create the cube with the negative fake companion injected
    cube_negfc = cube_inject_companions(cube,
                                        psfs_norm,
                                        angs,
                                        flevel=-flux,
                                        plsc=plsc,
                                        rad_dists=[r],
                                        n_branches=1,
                                        theta=theta,
                                        imlib=imlib,
                                        verbose=False,
                                        interpolation=interpolation)

    # Perform PCA and extract the zone of interest
    res = get_values_optimize(cube_negfc,
                              angs,
                              ncomp,
                              annulus_width * fwhm,
                              aperture_radius * fwhm,
                              initialState[0],
                              initialState[1],
                              cube_ref=cube_ref,
                              svd_mode=svd_mode,
                              scaling=scaling,
                              collapse=collapse,
                              debug=debug)
    if debug and collapse is not None:
        values, frpca = res
        plot_frames(frpca)
    else:
        values = res

    # Function of merit
    if fmerit == 'sum':
        values = np.abs(values)
        chi2 = np.sum(values[values > 0])
        N = len(values[values > 0])
        return chi2 / (N - 3)
    elif fmerit == 'stddev':
        return np.std(values[values != 0])
    else:
        raise RuntimeError('`fmerit` choice not recognized')
示例#19
0
def FullFramePCA(cube, angs, fwhm, name_input):

    print("\n --- FULL FRAME PCA")
    print("Use DS9 to obtain the reference coordinates")
    print(
        "A 'reference' is simply any kind of stable bright spot across the images, like a static speckle or a candidate."
    )

    reference_xy[0] = input("Input the x coordinate of the reference: ")
    reference_xy[0] = int(reference_xy[0])

    reference_xy[1] = input("Input the y coordinate of the reference: ")
    reference_xy[1] = int(reference_xy[1])

    print('Applying Full Frame PCA...')

    ffpca_output = vip.pca.pca_fullfr.pca(cube,
                                          angs,
                                          fwhm=fwhm,
                                          source_xy=(reference_xy[0],
                                                     reference_xy[1]),
                                          mask_center_px=None,
                                          ncomp=(1, len(cube), 1),
                                          full_output=True)

    # vip_hci.pca.pca_fullfr.pca returns this outputs:
    # (if full_output=True and source_xy != None)
    # 0 - Final residuals cube (NOT TO USE IN STIM MAP)
    # 1 - PCA Frame
    # 2 - Pandas dataframe with PCs, S/Ns and fluxes
    #
    #<class 'numpy.ndarray'> 10,1024,1024
    #<class 'numpy.ndarray'> 1024,1024
    #<class 'pandas.core.frame.DataFrame'> 10 (columns = PCs, S/Ns, fluxes)

    # Save FFPCA output
    fr_pca1 = ffpca_output[1]

    #	----- Getting Optimal Number of PCs
    # Get pandas dataframe table output into a numpy array
    pca_data = ffpca_output[2].rename_axis('ID').values

    # Extract and save S/N ratio and PCs columns in arrays
    snr_data = []
    pcs_data = []

    for i in range(0, len(pca_data)):
        pcs_data.append(pca_data[i][0])
        snr_data.append(pca_data[i][1])

    # Get the index of the maximum value of the S/N column,
    # and retrieve that same value in the PCs column
    # This will be the optimal number of principal components
    snr_max = np.argmax(snr_data, axis=0)
    optimal_pcs = int(pcs_data[snr_max])

    print("Optimal PCs", optimal_pcs)

    hciplot.plot_frames(
        fr_pca1,
        label='FFPCA reduction of {name}'.format(name=name_input),
        grid=False,
        size_factor=5)

    #Loop asks user if they would like to save the image.
    print('Would you like to save the Full Frame PCA reduced images?')
    questionsave = Checkfunction()

    if questionsave == 0:
        vip.fits.write_fits('new_FFPCA_{name}.fits'.format(name=name_input),
                            fr_pca1,
                            verbose=True)

    return fr_pca1, optimal_pcs
示例#20
0
def detection(array, fwhm=4, psf=None, mode='lpeaks', bkg_sigma=5,
              matched_filter=False, mask=True, snr_thresh=5, nproc=1, plot=True,
              debug=False, full_output=False, verbose=True, **kwargs):
    """ Finds blobs in a 2d array. The algorithm is designed for automatically
    finding planets in post-processed high contrast final frames. Blob can be
    defined as a region of an image in which some properties are constant or
    vary within a prescribed range of values. See ``Notes`` below to read about
    the algorithm details.

    Parameters
    ----------
    array : numpy ndarray, 2d
        Input frame.
    fwhm : None or int, optional
        Size of the FWHM in pixels. If None and a ``psf`` is provided, then the
        FWHM is measured on the PSF image.
    psf : numpy ndarray
        Input PSF template. It must be normalized with the
        ``vip_hci.metrics.normalize_psf`` function.
    mode : {'lpeaks', 'log', 'dog', 'snrmap', 'snrmapf'}, optional
        Sets with algorithm to use. Each algorithm yields different results. See
        notes for the details of each method.
    bkg_sigma : int or float, optional
        The number standard deviations above the clipped median for setting the
        background level. Used when ``mode`` is either 'lpeaks', 'dog' or 'log'.
    matched_filter : bool, optional
        Whether to correlate with the psf of not. Used when ``mode`` is either
        'lpeaks', 'dog' or 'log'.
    mask : bool, optional
        If True the central region (circular aperture of 2*FWHM radius) of the
        image will be masked out.
    snr_thresh : float, optional
        S/N threshold for deciding whether the blob is a detection or not. Used
        to threshold the S/N map when ``mode`` is set to 'snrmap' or 'snrmapf'.
    nproc : None or int, optional
        The number of processes for running the ``snrmap`` function.
    plot : bool, optional
        If True plots the frame showing the detected blobs on top.
    debug : bool, optional
        Whether to print and plot additional/intermediate results.
    full_output : bool, optional
        Whether to output just the coordinates of blobs that fulfill the SNR
        constraint or a table with all the blobs and the peak pixels and SNR.
    verbose : bool, optional
        Whether to print to stdout information about found blobs.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    yy, xx : numpy ndarray
        Two vectors with the y and x coordinates of the centers of the sources
        (potential planets).
    If full_output is True then a table with all the candidates that passed the
    2d Gaussian fit constrains and their S/N is returned.

    Notes
    -----
    When ``mode`` is either 'lpeaks', 'dog' or 'log', the detection might happen
    in the input frame or in a match-filtered version of it (by setting
    ``matched_filter`` to True and providing a PSF template, to run a
    correlation filter). Filtering the image will smooth the noise and maximize
    detectability of objects with a shape similar to the kernel. When ``mode``
    is either 'snrmap' or 'snrmapf', the detection is done on an S/N map
    directly.

    When ``mode`` is set to:
        'lpeaks' (Local maxima): The local peaks above the background (computed
        using sigma clipped statistics) on the (correlated) frame are detected.
        A maximum filter is used for finding local maxima. This operation
        dilates the original image and merges neighboring local maxima closer
        than the size of the dilation. Locations where the original image is
        equal to the dilated image are returned as local maxima. The minimum
        separation between the peaks is 1*FWHM.

        'log' (Laplacian of Gaussian): It computes the Laplacian of Gaussian
        images with successively increasing standard deviation and stacks them
        up in a cube. Blobs are local maximas in this cube. LOG assumes that the
        blobs are again assumed to be bright on dark.

        'dog' (Difference of Gaussians): This is a faster approximation of the
        Laplacian of Gaussian approach. In this case the image is blurred with
        increasing standard deviations and the difference between two
        successively blurred images are stacked up in a cube. DOG assumes that
        the blobs are again assumed to be bright on dark.

        'snrmap' or 'snrmapf': A threshold is applied to the S/N map, computed
        with the ``snrmap`` function (``snrmapf`` calls ``snrmap`` with
        ``approximated`` set to True). The threshold is given by ``snr_thresh``
        and local maxima are found as in the case of 'lpeaks'.

    Finally, a 2d Gaussian fit is done on each of the potential blobs
    constraining the position on a cropped sub-image and the sigma of the fit
    (to match the input FWHM). Finally the blobs are filtered based on its S/N
    value, according to ``snr_thresh``.

    """
    def check_blobs(array, coords_temp, fwhm, debug):
        y_temp = coords_temp[:, 0]
        x_temp = coords_temp[:, 1]
        coords = []
        # Fitting a 2d gaussian to each local maxima position
        for y, x in zip(y_temp, x_temp):
            subsi = 3 * int(np.ceil(fwhm))
            if subsi % 2 == 0:
                subsi += 1

            if mode in ('lpeaks', 'log', 'dog'):
                scy = y + pad
                scx = x + pad
            elif mode in ('snrmap', 'snrmapf'):
                scy = y
                scx = x
            subim, suby, subx = get_square(array, subsi, scy, scx,
                                           position=True, force=True,
                                           verbose=False)
            cy, cx = frame_center(subim)

            gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx,
                                      y_mean=cy, theta=0,
                                      x_stddev=fwhm*gaussian_fwhm_to_sigma,
                                      y_stddev=fwhm*gaussian_fwhm_to_sigma)

            sy, sx = np.indices(subim.shape)
            fitter = fitting.LevMarLSQFitter()
            fit = fitter(gauss, sx, sy, subim)

            # checking that the amplitude is positive > 0
            # checking whether the x and y centroids of the 2d gaussian fit
            # coincide with the center of the subimage (within 2px error)
            # checking whether the mean of the fwhm in y and x of the fit
            # are close to the FWHM_PSF with a margin of 3px
            fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
            fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
            mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
            condyf = np.allclose(fit.y_mean.value, cy, atol=2)
            condxf = np.allclose(fit.x_mean.value, cx, atol=2)
            condmf = np.allclose(mean_fwhm_fit, fwhm, atol=3)
            if fit.amplitude.value > 0 and condxf and condyf and condmf:
                coords.append((suby + fit.y_mean.value,
                               subx + fit.x_mean.value))

                if debug:
                    print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
                    print('fit peak = {:.3f}'.format(fit.amplitude.value))
                    msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
                    print(msg.format(fwhm_y, fwhm_x))
                    print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
                    if plot:
                        plot_frames(subim, colorbar=True, axis=False, dpi=60)
        return coords

    def print_coords(coords):
        print('Blobs found:', len(coords))
        print(' ycen   xcen')
        print('------ ------')
        for j in range(len(coords[:, 0])):
            print('{:.3f} \t {:.3f}'.format(coords[j, 0], coords[j, 1]))

    def print_abort():
        if verbose:
            print(sep)
            print('No potential sources found')
            print(sep)

    # --------------------------------------------------------------------------
    if array.ndim != 2:
        raise TypeError('Input array is not a frame or 2d array')
    if psf is not None:
        if psf.ndim != 2 and psf.shape[0] < array.shape[0]:
            raise TypeError('Input psf is not a 2d array or has wrong size')
    else:
        if matched_filter:
            raise ValueError('`psf` must be provided when `matched_filter` is '
                             'True')

    if fwhm is None:
        if psf is not None:
            # Getting the FWHM from the PSF array
            cenpsf = frame_center(psf)
            outdf = fit_2dgaussian(psf, cent=(cenpsf), debug=debug,
                                   full_output=True)
            fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y']
            fwhm = np.mean([fwhm_x, fwhm_y])
            if verbose:
                print('FWHM = {:.2f} pxs\n'.format(fwhm))
            if debug:
                print('FWHM_y', fwhm_y)
                print('FWHM_x', fwhm_x)
        else:
            raise ValueError('`fwhm` or `psf` must be provided')

    # Masking the center, 2*lambda/D is the expected IWA
    if mask:
        array = mask_circle(array, radius=fwhm)

    # Generating a detection map: Match-filtered frame or SNRmap
    # For 'lpeaks', 'dog', 'log' it is possible to skip this step
    if mode in ('lpeaks', 'log', 'dog'):
        if matched_filter:
            frame_det = correlate(array, psf)
        else:
            frame_det = array

        if debug and plot and matched_filter:
            print('Match-filtered frame:')
            plot_frames(frame_det, colorbar=True)

        # Estimation of background level
        _, median, stddev = sigma_clipped_stats(frame_det, sigma=5,
                                                maxiters=None)
        bkg_level = median + (stddev * bkg_sigma)
        if debug:
            print('Sigma clipped median = {:.3f}'.format(median))
            print('Sigma clipped stddev = {:.3f}'.format(stddev))
            print('Background threshold = {:.3f}'.format(bkg_level), '\n')

    elif mode in ('snrmap', 'snrmapf'):
        if mode == 'snrmap':
            approx = False
        elif mode == 'snrmapf':
            approx = True
        frame_det = snrmap(array, fwhm=fwhm, approximated=approx, plot=False,
                           nproc=nproc, verbose=verbose)

        if debug and plot:
            print('Signal-to-noise ratio map:')
            plot_frames(frame_det, colorbar=True)

    if mode in ('lpeaks', 'log', 'dog'):
        # Padding the image with zeros to avoid errors at the edges
        pad = 10
        array_padded = np.lib.pad(array, pad, 'constant', constant_values=0)

    if mode in ('lpeaks', 'snrmap', 'snrmapf'):
        if mode == 'lpeaks':
            threshold = bkg_level
        else:
            threshold = snr_thresh

        coords_temp = peak_local_max(frame_det, threshold_abs=threshold,
                                     min_distance=int(np.ceil(fwhm)),
                                     num_peaks=20)

        if mode == 'lpeaks':
            coords = check_blobs(array_padded, coords_temp, fwhm, debug)
        else:
            coords = check_blobs(array, coords_temp, fwhm, debug)
        coords = np.array(coords)
        if verbose and coords.shape[0] > 0:
            print_coords(coords)

    elif mode == 'log':
        sigma = fwhm * gaussian_fwhm_to_sigma
        coords = feature.blob_log(frame_det.astype('float'),
                                  threshold=bkg_level, min_sigma=sigma-.5,
                                  max_sigma=sigma+.5)
        if len(coords) == 0:
            print_abort()
            return 0, 0
        coords = coords[:, :2]
        coords = check_blobs(array_padded, coords, fwhm, debug)
        coords = np.array(coords)
        if coords.shape[0] > 0 and verbose:
            print_coords(coords)

    elif mode == 'dog':
        sigma = fwhm * gaussian_fwhm_to_sigma
        coords = feature.blob_dog(frame_det.astype('float'),
                                  threshold=bkg_level, min_sigma=sigma-.5,
                                  max_sigma=sigma+.5)
        if len(coords) == 0:
            print_abort()
            return 0, 0
        coords = coords[:, :2]
        coords = check_blobs(array_padded, coords, fwhm, debug)
        coords = np.array(coords)
        if coords.shape[0] > 0 and verbose:
            print_coords(coords)

    else:
        raise ValueError('`mode` not recognized')

    if coords.shape[0] == 0:
        print_abort()
        return 0, 0

    yy = coords[:, 0]
    xx = coords[:, 1]
    yy_final = []
    xx_final = []
    yy_out = []
    xx_out = []
    snr_list = []

    if mode in ('lpeaks', 'log', 'dog'):
        xx -= pad
        yy -= pad

    # Checking S/N for potential sources
    for i in range(yy.shape[0]):
        y = yy[i]
        x = xx[i]
        if verbose:
            print('')
            print(sep)
            print('X,Y = ({:.1f},{:.1f})'.format(x, y))
        snr_value = snr(array, (x, y), fwhm, False, verbose=False)
        snr_list.append(snr_value)
        if snr_value >= snr_thresh:
            if verbose:
                _ = frame_report(array, fwhm, (x, y), verbose=verbose)
            yy_final.append(y)
            xx_final.append(x)
        else:
            yy_out.append(y)
            xx_out.append(x)
            if verbose:
                msg = 'S/N constraint NOT fulfilled (S/N = {:.3f})'
                print(msg.format(snr_value))
            if debug:
                _ = frame_report(array, fwhm, (x, y), verbose=verbose)
    if verbose:
        print(sep)

    if debug or full_output:
        table_full = pn.DataFrame({'y': yy.tolist(),
                                   'x': xx.tolist(),
                                   'px_snr': snr_list})
        table_full.sort_values('px_snr')

    yy_final = np.array(yy_final)
    xx_final = np.array(xx_final)
    yy_out = np.array(yy_out)
    xx_out = np.array(xx_out)
    table = pn.DataFrame({'y': yy_final.tolist(), 'x': xx_final.tolist()})

    if plot:
        coords = tuple(zip(xx_out.tolist() + xx_final.tolist(),
                           yy_out.tolist() + yy_final.tolist()))
        circlealpha = [0.3] * len(xx_out)
        circlealpha += [1] * len(xx_final)
        plot_frames(array, dpi=120, circle=coords, circle_alpha=circlealpha,
                    circle_label=True, circle_radius=fwhm, **kwargs)

    if debug:
        print(table_full)

    if full_output:
        return table_full
    else:
        return table
示例#21
0
文件: detection.py 项目: zuzhaoye/VIP
def detection(array,
              fwhm=4,
              psf=None,
              mode='lpeaks',
              bkg_sigma=5,
              matched_filter=False,
              mask=True,
              snr_thresh=5,
              nproc=1,
              plot=True,
              debug=False,
              full_output=False,
              verbose=True,
              **kwargs):
    """ Finds blobs in a 2d array. The algorithm is designed for automatically
    finding planets in post-processed high contrast final frames. Blob can be
    defined as a region of an image in which some properties are constant or
    vary within a prescribed range of values. See ``Notes`` below to read about
    the algorithm details.

    Parameters
    ----------
    array : numpy ndarray, 2d
        Input frame.
    fwhm : None or int, optional
        Size of the FWHM in pixels. If None and a ``psf`` is provided, then the
        FWHM is measured on the PSF image.
    psf : numpy ndarray
        Input PSF template. It must be normalized with the
        ``vip_hci.metrics.normalize_psf`` function.
    mode : {'lpeaks', 'log', 'dog', 'snrmap', 'snrmapf'}, optional
        Sets with algorithm to use. Each algorithm yields different results. See
        notes for the details of each method.
    bkg_sigma : int or float, optional
        The number standard deviations above the clipped median for setting the
        background level. Used when ``mode`` is either 'lpeaks', 'dog' or 'log'.
    matched_filter : bool, optional
        Whether to correlate with the psf of not. Used when ``mode`` is either
        'lpeaks', 'dog' or 'log'.
    mask : bool, optional
        If True the central region (circular aperture of 2*FWHM radius) of the
        image will be masked out.
    snr_thresh : float, optional
        S/N threshold for deciding whether the blob is a detection or not. Used
        to threshold the S/N map when ``mode`` is set to 'snrmap' or 'snrmapf'.
    nproc : None or int, optional
        The number of processes for running the ``snrmap`` function.
    plot : bool, optional
        If True plots the frame showing the detected blobs on top.
    debug : bool, optional
        Whether to print and plot additional/intermediate results.
    full_output : bool, optional
        Whether to output just the coordinates of blobs that fulfill the SNR
        constraint or a table with all the blobs and the peak pixels and SNR.
    verbose : bool, optional
        Whether to print to stdout information about found blobs.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    yy, xx : numpy ndarray
        Two vectors with the y and x coordinates of the centers of the sources
        (potential planets).
    If full_output is True then a table with all the candidates that passed the
    2d Gaussian fit constrains and their S/N is returned.

    Notes
    -----
    When ``mode`` is either 'lpeaks', 'dog' or 'log', the detection might happen
    in the input frame or in a match-filtered version of it (by setting
    ``matched_filter`` to True and providing a PSF template, to run a
    correlation filter). Filtering the image will smooth the noise and maximize
    detectability of objects with a shape similar to the kernel. When ``mode``
    is either 'snrmap' or 'snrmapf', the detection is done on an S/N map
    directly.

    When ``mode`` is set to:
        'lpeaks' (Local maxima): The local peaks above the background (computed
        using sigma clipped statistics) on the (correlated) frame are detected.
        A maximum filter is used for finding local maxima. This operation
        dilates the original image and merges neighboring local maxima closer
        than the size of the dilation. Locations where the original image is
        equal to the dilated image are returned as local maxima. The minimum
        separation between the peaks is 1*FWHM.

        'log' (Laplacian of Gaussian): It computes the Laplacian of Gaussian
        images with successively increasing standard deviation and stacks them
        up in a cube. Blobs are local maximas in this cube. LOG assumes that the
        blobs are again assumed to be bright on dark.

        'dog' (Difference of Gaussians): This is a faster approximation of the
        Laplacian of Gaussian approach. In this case the image is blurred with
        increasing standard deviations and the difference between two
        successively blurred images are stacked up in a cube. DOG assumes that
        the blobs are again assumed to be bright on dark.

        'snrmap' or 'snrmapf': A threshold is applied to the S/N map, computed
        with the ``snrmap`` function (``snrmapf`` calls ``snrmap`` with
        ``approximated`` set to True). The threshold is given by ``snr_thresh``
        and local maxima are found as in the case of 'lpeaks'.

    Finally, a 2d Gaussian fit is done on each of the potential blobs
    constraining the position on a cropped sub-image and the sigma of the fit
    (to match the input FWHM). Finally the blobs are filtered based on its S/N
    value, according to ``snr_thresh``.

    """
    def check_blobs(array, coords_temp, fwhm, debug):
        y_temp = coords_temp[:, 0]
        x_temp = coords_temp[:, 1]
        coords = []
        # Fitting a 2d gaussian to each local maxima position
        for y, x in zip(y_temp, x_temp):
            subsi = 3 * int(np.ceil(fwhm))
            if subsi % 2 == 0:
                subsi += 1

            if mode in ('lpeaks', 'log', 'dog'):
                scy = y + pad
                scx = x + pad
            elif mode in ('snrmap', 'snrmapf'):
                scy = y
                scx = x
            subim, suby, subx = get_square(array,
                                           subsi,
                                           scy,
                                           scx,
                                           position=True,
                                           force=True,
                                           verbose=False)
            cy, cx = frame_center(subim)

            gauss = models.Gaussian2D(amplitude=subim.max(),
                                      x_mean=cx,
                                      y_mean=cy,
                                      theta=0,
                                      x_stddev=fwhm * gaussian_fwhm_to_sigma,
                                      y_stddev=fwhm * gaussian_fwhm_to_sigma)

            sy, sx = np.indices(subim.shape)
            fitter = fitting.LevMarLSQFitter()
            fit = fitter(gauss, sx, sy, subim)

            # checking that the amplitude is positive > 0
            # checking whether the x and y centroids of the 2d gaussian fit
            # coincide with the center of the subimage (within 2px error)
            # checking whether the mean of the fwhm in y and x of the fit
            # are close to the FWHM_PSF with a margin of 3px
            fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
            fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
            mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
            condyf = np.allclose(fit.y_mean.value, cy, atol=2)
            condxf = np.allclose(fit.x_mean.value, cx, atol=2)
            condmf = np.allclose(mean_fwhm_fit, fwhm, atol=3)
            if fit.amplitude.value > 0 and condxf and condyf and condmf:
                coords.append(
                    (suby + fit.y_mean.value, subx + fit.x_mean.value))

                if debug:
                    print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
                    print('fit peak = {:.3f}'.format(fit.amplitude.value))
                    msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
                    print(msg.format(fwhm_y, fwhm_x))
                    print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
                    if plot:
                        plot_frames(subim, colorbar=True, axis=False, dpi=60)
        return coords

    def print_coords(coords):
        print('Blobs found:', len(coords))
        print(' ycen   xcen')
        print('------ ------')
        for j in range(len(coords[:, 0])):
            print('{:.3f} \t {:.3f}'.format(coords[j, 0], coords[j, 1]))

    def print_abort():
        if verbose:
            print(sep)
            print('No potential sources found')
            print(sep)

    # --------------------------------------------------------------------------
    if array.ndim != 2:
        raise TypeError('Input array is not a frame or 2d array')
    if psf is not None:
        if psf.ndim != 2 and psf.shape[0] < array.shape[0]:
            raise TypeError('Input psf is not a 2d array or has wrong size')
    else:
        if matched_filter:
            raise ValueError('`psf` must be provided when `matched_filter` is '
                             'True')

    if fwhm is None:
        if psf is not None:
            # Getting the FWHM from the PSF array
            cenpsf = frame_center(psf)
            outdf = fit_2dgaussian(psf,
                                   cent=(cenpsf),
                                   debug=debug,
                                   full_output=True)
            fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y']
            fwhm = np.mean([fwhm_x, fwhm_y])
            if verbose:
                print('FWHM = {:.2f} pxs\n'.format(fwhm))
            if debug:
                print('FWHM_y', fwhm_y)
                print('FWHM_x', fwhm_x)
        else:
            raise ValueError('`fwhm` or `psf` must be provided')

    # Masking the center, 2*lambda/D is the expected IWA
    if mask:
        array = mask_circle(array, radius=fwhm)

    # Generating a detection map: Match-filtered frame or SNRmap
    # For 'lpeaks', 'dog', 'log' it is possible to skip this step
    if mode in ('lpeaks', 'log', 'dog'):
        if matched_filter:
            frame_det = correlate(array, psf)
        else:
            frame_det = array

        if debug and plot and matched_filter:
            print('Match-filtered frame:')
            plot_frames(frame_det, colorbar=True)

        # Estimation of background level
        _, median, stddev = sigma_clipped_stats(frame_det,
                                                sigma=5,
                                                maxiters=None)
        bkg_level = median + (stddev * bkg_sigma)
        if debug:
            print('Sigma clipped median = {:.3f}'.format(median))
            print('Sigma clipped stddev = {:.3f}'.format(stddev))
            print('Background threshold = {:.3f}'.format(bkg_level), '\n')

    elif mode in ('snrmap', 'snrmapf'):
        if mode == 'snrmap':
            approx = False
        elif mode == 'snrmapf':
            approx = True
        frame_det = snrmap(array,
                           fwhm=fwhm,
                           approximated=approx,
                           plot=False,
                           nproc=nproc,
                           verbose=verbose)

        if debug and plot:
            print('Signal-to-noise ratio map:')
            plot_frames(frame_det, colorbar=True)

    if mode in ('lpeaks', 'log', 'dog'):
        # Padding the image with zeros to avoid errors at the edges
        pad = 10
        array_padded = np.lib.pad(array, pad, 'constant', constant_values=0)

    if mode in ('lpeaks', 'snrmap', 'snrmapf'):
        if mode == 'lpeaks':
            threshold = bkg_level
        else:
            threshold = snr_thresh

        coords_temp = peak_local_max(frame_det,
                                     threshold_abs=threshold,
                                     min_distance=int(np.ceil(fwhm)),
                                     num_peaks=20)

        if mode == 'lpeaks':
            coords = check_blobs(array_padded, coords_temp, fwhm, debug)
        else:
            coords = check_blobs(array, coords_temp, fwhm, debug)
        coords = np.array(coords)
        if verbose and coords.shape[0] > 0:
            print_coords(coords)

    elif mode == 'log':
        sigma = fwhm * gaussian_fwhm_to_sigma
        coords = feature.blob_log(frame_det.astype('float'),
                                  threshold=bkg_level,
                                  min_sigma=sigma - .5,
                                  max_sigma=sigma + .5)
        if len(coords) == 0:
            print_abort()
            return 0, 0
        coords = coords[:, :2]
        coords = check_blobs(array_padded, coords, fwhm, debug)
        coords = np.array(coords)
        if coords.shape[0] > 0 and verbose:
            print_coords(coords)

    elif mode == 'dog':
        sigma = fwhm * gaussian_fwhm_to_sigma
        coords = feature.blob_dog(frame_det.astype('float'),
                                  threshold=bkg_level,
                                  min_sigma=sigma - .5,
                                  max_sigma=sigma + .5)
        if len(coords) == 0:
            print_abort()
            return 0, 0
        coords = coords[:, :2]
        coords = check_blobs(array_padded, coords, fwhm, debug)
        coords = np.array(coords)
        if coords.shape[0] > 0 and verbose:
            print_coords(coords)

    else:
        raise ValueError('`mode` not recognized')

    if coords.shape[0] == 0:
        print_abort()
        return 0, 0

    yy = coords[:, 0]
    xx = coords[:, 1]
    yy_final = []
    xx_final = []
    yy_out = []
    xx_out = []
    snr_list = []

    if mode in ('lpeaks', 'log', 'dog'):
        xx -= pad
        yy -= pad

    # Checking S/N for potential sources
    for i in range(yy.shape[0]):
        y = yy[i]
        x = xx[i]
        if verbose:
            print('')
            print(sep)
            print('X,Y = ({:.1f},{:.1f})'.format(x, y))
        snr_value = snr(array, (x, y), fwhm, False, verbose=False)
        snr_list.append(snr_value)
        if snr_value >= snr_thresh:
            if verbose:
                _ = frame_report(array, fwhm, (x, y), verbose=verbose)
            yy_final.append(y)
            xx_final.append(x)
        else:
            yy_out.append(y)
            xx_out.append(x)
            if verbose:
                msg = 'S/N constraint NOT fulfilled (S/N = {:.3f})'
                print(msg.format(snr_value))
            if debug:
                _ = frame_report(array, fwhm, (x, y), verbose=verbose)
    if verbose:
        print(sep)

    if debug or full_output:
        table_full = pn.DataFrame({
            'y': yy.tolist(),
            'x': xx.tolist(),
            'px_snr': snr_list
        })
        table_full.sort_values('px_snr')

    yy_final = np.array(yy_final)
    xx_final = np.array(xx_final)
    yy_out = np.array(yy_out)
    xx_out = np.array(xx_out)
    table = pn.DataFrame({'y': yy_final.tolist(), 'x': xx_final.tolist()})

    if plot:
        coords = tuple(
            zip(xx_out.tolist() + xx_final.tolist(),
                yy_out.tolist() + yy_final.tolist()))
        circlealpha = [0.3] * len(xx_out)
        circlealpha += [1] * len(xx_final)
        plot_frames(array,
                    dpi=120,
                    circle=coords,
                    circle_alpha=circlealpha,
                    circle_label=True,
                    circle_radius=fwhm,
                    **kwargs)

    if debug:
        print(table_full)

    if full_output:
        return table_full
    else:
        return table
示例#22
0
文件: fit_2d.py 项目: carlgogo/VIP
def fit_2dgaussian(array, crop=False, cent=None, cropsize=15, fwhmx=4, fwhmy=4,
                   theta=0, threshold=False, sigfactor=6, full_output=True,
                   debug=True):
    """ Fitting a 2D Gaussian to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True an square sub image will be cropped.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    'amplitude' : Float value. Amplitude of the Gaussian.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm_x' : Float value. FHWM in X [px].
    'fwhm_y' : Float value. FHWM in Y [px].
    'theta' : Float value. Rotation angle.

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
                                              ceny, cenx, position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2D Gaussian model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = photutils.centroid_com(psf_subimage)
    gauss = models.Gaussian2D(amplitude=init_amplitude, theta=theta,
                              x_mean=xcom, y_mean=ycom,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = np.rad2deg(fit.theta.value)

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage-fit(x, y)),
                    grid=True, grid_spacing=1, label=label)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x, '\n')
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean.value)
        print('centroid x subim =', fit.x_mean.value, '\n')
        print('amplitude =', amplitude)
        print('theta =', theta)

    if full_output:
        return pd.DataFrame({'centroid_y': mean_y, 'centroid_x': mean_x,
                             'fwhm_y': fwhm_y, 'fwhm_x': fwhm_x,
                             'amplitude': amplitude, 'theta': theta}, index=[0])
    else:
        return mean_y, mean_x
示例#23
0
def snrmap(array, fwhm, approximated=False, plot=False, known_sources=None,
           nproc=None, array2=None, use2alone=False, verbose=True, **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.

    Parameters
    ----------
    array : numpy ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    approximated : bool, optional
        If True, an approximated S/N map is generated.
    plot : bool, optional
        If True plots the S/N map. False by default.
    known_sources : None, tuple or tuple of tuples, optional
        To take into account existing sources. It should be a tuple of float/int
        or a tuple of tuples (of float/int) with the coordinate(s) of the known
        sources.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d numpy ndarray
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()

    check_array(array, dim=2, msg='array')
    sizey, sizex = array.shape
    snrmap_array = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2        # Hyper-threading doubles the # of cores

    if known_sources is None:

        # proxy to S/N calculation
        if approximated:
            cy, cx = frame_center(array)
            tophat_kernel = Tophat2DKernel(fwhm / 2)
            array = convolve(array, tophat_kernel)
            width = min(sizey, sizex) / 2 - 1.5 * fwhm
            mask = get_annulus_segments(array, (fwhm / 2) + 1, width - 1,
                                        mode="mask")[0]
            mask = np.ma.make_mask(mask)
            yy, xx = np.where(mask)
            coords = [(int(x), int(y)) for (x, y) in zip(xx, yy)]
            res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm,
                           cy, cx)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, 2]
            snrmap_array[yy.astype(int), xx.astype(int)] = snr_value

        # computing s/n map with Mawet+14 definition
        else:
            res = pool_map(nproc, snr, array, iterable(coords), fwhm, True,
                           array2, use2alone)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, -1]
            snrmap_array[yy.astype('int'), xx.astype('int')] = snr_value

    # masking known sources
    else:
        if not isinstance(known_sources, tuple):
            raise TypeError("`known_sources` must be a tuple or tuple of "
                            "tuples")
        else:
            source_mask = np.zeros_like(array)
            if isinstance(known_sources[0], tuple):
                for coor in known_sources:
                    source_mask[coor[::-1]] = 1
            elif isinstance(known_sources[0], int):
                source_mask[known_sources[1], known_sources[0]] = 1
            else:
                raise TypeError("`known_sources` seems to have wrong type. It "
                                "must be a tuple of ints or tuple of tuples "
                                "(of ints)")

        # checking the mask with the sources
        if source_mask[source_mask == 1].shape[0] > 50:
            msg = 'Input source mask is too crowded (check its validity)'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 1)
        sources = []
        coor_ann = []
        arr_masked_sources = array.copy()
        centery, centerx = frame_center(array)
        for y, x in zip(soury, sourx):
            radd = dist(centery, centerx, int(y), int(x))
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, int(y), int(x))
            anny, annx = get_annulus_segments(array, int(radd-fwhm),
                                              int(np.round(3 * fwhm)))[0]

            ciry, cirx = draw.circle(y, x, int(np.ceil(fwhm)))
            # masking the sources positions (using the MAD of pixels in annulus)
            arr_masked_sources[ciry, cirx] = mad(array[anny, annx])

            # S/Ns of annulus without the sources
            coor_ann = [(x, y) for (x, y) in zip(annx, anny) if (x, y) not in
                        zip(cirx, ciry)]
            res = pool_map(nproc, snr, arr_masked_sources, iterable(coor_ann),
                           fwhm, True, array2, use2alone)
            res = np.array(res)
            yy_res = res[:, 0]
            xx_res = res[:, 1]
            snr_value = res[:, 4]
            snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value
            coor_ann += coor_ann

        # S/Ns of the rest of the frame without the annulus
        coor_rest = [(x, y) for (x, y) in zip(xx, yy) if (x, y) not in coor_ann]
        res = pool_map(nproc, snr, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy_res = res[:, 0]
        xx_res = res[:, 1]
        snr_value = res[:, 4]
        snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value

    if plot:
        plot_frames(snrmap_array, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes".format(nproc))
        timing(start_time)
    return snrmap_array
示例#24
0
    def correct_bad_pixels(self, verbose=True, debug=False):

        sci_list = []
        with open(self.outpath + "sci_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sci_list.append(line.split('\n')[0])

        sky_list = []
        with open(self.outpath + "sky_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sky_list.append(line.split('\n')[0])

        unsat_list = []
        with open(self.outpath + "unsat_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                unsat_list.append(line.split('\n')[0])

        n_sci = len(sci_list)
        ndit_sci = fits_info.ndit_sci
        n_sky = len(sky_list)
        ndit_sky = fits_info.ndit_sky

        tmp = open_fits(self.outpath + '1_crop_unsat_' + unsat_list[-1],
                        header=False)
        nx_unsat_crop = tmp.shape[2]

        master_flat_frame = open_fits(self.outpath + 'master_flat_field.fits')
        # Create bpix map
        bpix = np.where(np.abs(master_flat_frame - 1.09) >
                        0.41)  # i.e. for QE < 0.68 and QE > 1.5
        bpix_map = np.zeros([self.com_sz, self.com_sz])
        bpix_map[bpix] = 1
        if nx_unsat_crop < bpix_map.shape[1]:
            bpix_map_unsat = frame_crop(bpix_map, nx_unsat_crop)
        else:
            bpix_map_unsat = bpix_map

        #number of bad pixels
        nbpix = int(np.sum(bpix_map))
        ntotpix = self.com_sz**2

        print("total number of bpix: ", nbpix)
        print("total number of pixels: ", ntotpix)
        print("=> {}% of bad pixels.".format(100 * nbpix / ntotpix))

        write_fits(self.outpath + 'master_bpix_map.fits', bpix_map)
        write_fits(self.outpath + 'master_bpix_map_unsat.fits', bpix_map_unsat)
        plot_frames(bpix_map, bpix_map_unsat)

        #update final crop size
        final_sz = self.get_final_sz()

        #crop frames to that size
        for sc, fits_name in enumerate(sci_list):
            tmp = open_fits(self.outpath + '2_nan_corr_' + fits_name,
                            verbose=False)
            tmp_tmp = cube_crop_frames(tmp, final_sz, xy=self.agpm_pos)
            write_fits(self.outpath + '2_crop_' + fits_name, tmp_tmp)
            if not debug:
                os.system("rm " + self.outpath + '2_nan_corr_' + fits_name)

        for sk, fits_name in enumerate(sky_list):
            tmp = open_fits(self.outpath + '2_nan_corr_' + fits_name,
                            verbose=False)
            tmp_tmp = cube_crop_frames(tmp, final_sz, xy=self.agpm_pos)
            write_fits(self.outpath + '2_crop_' + fits_name, tmp_tmp)
            if not debug:
                os.system("rm " + self.outpath + '2_nan_corr_' + fits_name)

        if not debug:
            tmp = open_fits(self.outpath + '2_crop_' + sci_list[0])[-1]
            tmp_tmp = open_fits(self.outpath + '2_crop_' + sci_list[-1])[-1]
            plot_frames(tmp, tmp_tmp)
        else:
            # COMPARE BEFORE AND AFTER NAN_CORR + CROP
            old_tmp = open_fits(self.outpath + '2_ff_' + sci_list[0])[-1]
            old_tmp_tmp = open_fits(self.outpath + '2_ff_' + sci_list[-1])[-1]
            tmp = open_fits(self.outpath + '2_crop_' + sci_list[0])[-1]
            tmp_tmp = open_fits(self.outpath + '2_crop_' + sci_list[-1])[-1]
            plot_frames(old_tmp, tmp, old_tmp_tmp, tmp_tmp)

        # Crop the bpix map in a same way
        bpix_map = open_fits(self.outpath + 'master_bpix_map.fits')
        bpix_map_2ndcrop = frame_crop(bpix_map, final_sz, cenxy=self.agpm_pos)
        write_fits(self.outpath + 'master_bpix_map_2ndcrop.fits',
                   bpix_map_2ndcrop)

        bpix_map = open_fits(self.outpath + 'master_bpix_map_2ndcrop.fits')
        t0 = time_ini()
        for sc, fits_name in enumerate(sci_list):
            tmp = open_fits(self.outpath + '2_crop_' + fits_name,
                            verbose=False)
            # first with the bp max defined from the flat field (without protecting radius)
            tmp_tmp = cube_fix_badpix_isolated(tmp,
                                               bpm_mask=bpix_map,
                                               sigma_clip=7,
                                               num_neig=5,
                                               size=5,
                                               protect_mask=True,
                                               radius=9,
                                               verbose=False,
                                               debug=False)
            write_fits(self.outpath + '2_bpix_corr_' + fits_name,
                       tmp_tmp,
                       verbose=False)
            timing(t0)
            # second, residual hot pixels
            tmp_tmp, bpm = cube_fix_badpix_isolated(tmp_tmp,
                                                    bpm_mask=None,
                                                    sigma_clip=8,
                                                    num_neig=5,
                                                    size=5,
                                                    protect_mask=True,
                                                    radius=10,
                                                    verbose=False,
                                                    debug=False,
                                                    full_output=True)
            write_fits(self.outpath + '2_bpix_corr2_' + fits_name, tmp_tmp)
            write_fits(self.outpath + '2_bpix_corr2_map_' + fits_name, bpm)
            timing(t0)
            if not debug:
                os.system("rm " + self.outpath + '2_crop_' + fits_name)

        bpix_map = open_fits(self.outpath + 'master_bpix_map_2ndcrop.fits')
        t0 = time_ini()
        for sk, fits_name in enumerate(sky_list):
            tmp = open_fits(self.outpath + '2_crop_' + fits_name,
                            verbose=False)
            # first with the bp max defined from the flat field (without protecting radius)
            tmp_tmp = cube_fix_badpix_isolated(tmp,
                                               bpm_mask=bpix_map,
                                               sigma_clip=7,
                                               num_neig=5,
                                               size=5,
                                               protect_mask=True,
                                               radius=9,
                                               verbose=False,
                                               debug=False)
            write_fits(self.outpath + '2_bpix_corr_' + fits_name,
                       tmp_tmp,
                       verbose=False)
            timing(t0)
            # second, residual hot pixels
            tmp_tmp, bpm = cube_fix_badpix_isolated(tmp_tmp,
                                                    bpm_mask=None,
                                                    sigma_clip=8,
                                                    num_neig=5,
                                                    size=5,
                                                    protect_mask=True,
                                                    radius=10,
                                                    verbose=False,
                                                    debug=False,
                                                    full_output=True)
            write_fits(self.outpath + '2_bpix_corr2_' + fits_name, tmp_tmp)
            write_fits(self.outpath + '2_bpix_corr2_map_' + fits_name, bpm)
            timing(t0)
            if not debug:
                os.system("rm " + self.outpath + '2_crop_' + fits_name)

        bpix_map_unsat = open_fits(self.outpath + 'master_bpix_map_unsat.fits')
        t0 = time_ini()
        for un, fits_name in enumerate(unsat_list):
            tmp = open_fits(self.outpath + '2_nan_corr_unsat_' + fits_name,
                            verbose=False)
            # first with the bp max defined from the flat field (without protecting radius)
            tmp_tmp = cube_fix_badpix_isolated(tmp,
                                               bpm_mask=bpix_map_unsat,
                                               sigma_clip=7,
                                               num_neig=5,
                                               size=5,
                                               protect_mask=True,
                                               radius=9,
                                               verbose=False,
                                               debug=False)
            write_fits(self.outpath + '2_bpix_corr_unsat_' + fits_name,
                       tmp_tmp)
            timing(t0)
            # second, residual hot pixels
            tmp_tmp, bpm = cube_fix_badpix_isolated(tmp_tmp,
                                                    bpm_mask=None,
                                                    sigma_clip=8,
                                                    num_neig=5,
                                                    size=5,
                                                    protect_mask=True,
                                                    radius=10,
                                                    verbose=False,
                                                    debug=False,
                                                    full_output=True)
            write_fits(self.outpath + '2_bpix_corr2_unsat_' + fits_name,
                       tmp_tmp)
            write_fits(self.outpath + '2_bpix_corr2_map_unsat_' + fits_name,
                       bpm)
            timing(t0)
            if not debug:
                os.system("rm " + self.outpath + '2_nan_corr_unsat_' +
                          fits_name)

        # FIRST CREATE MASTER CUBE FOR SCI
        tmp_tmp_tmp = open_fits(self.outpath + '2_bpix_corr2_' + sci_list[0],
                                verbose=False)
        n_y = tmp_tmp_tmp.shape[1]
        n_x = tmp_tmp_tmp.shape[2]
        tmp_tmp_tmp = np.zeros([n_sci, n_y, n_x])
        for sc, fits_name in enumerate(sci_list):
            tmp_tmp_tmp[sc] = open_fits(
                self.outpath + '2_bpix_corr2_' + fits_name,
                verbose=False)[int(random.randrange(min(ndit_sci)))]
        tmp_tmp_tmp = np.median(tmp_tmp_tmp, axis=0)
        write_fits(self.outpath + 'TMP_2_master_median_SCI.fits', tmp_tmp_tmp)

        # THEN CREATE MASTER CUBE FOR SKY
        tmp_tmp_tmp = open_fits(self.outpath + '2_bpix_corr2_' + sky_list[0],
                                verbose=False)
        n_y = tmp_tmp_tmp.shape[1]
        n_x = tmp_tmp_tmp.shape[2]
        tmp_tmp_tmp = np.zeros([n_sky, n_y, n_x])
        for sk, fits_name in enumerate(sky_list):
            tmp_tmp_tmp[sk] = open_fits(
                self.outpath + '2_bpix_corr2_' + fits_name,
                verbose=False)[int(random.randrange(min(ndit_sky)))]
        tmp_tmp_tmp = np.median(tmp_tmp_tmp, axis=0)
        write_fits(self.outpath + 'TMP_2_master_median_SKY.fits', tmp_tmp_tmp)

        bpix_map_ori = open_fits(self.outpath + 'master_bpix_map_2ndcrop.fits')
        bpix_map_sci_0 = open_fits(self.outpath + '2_bpix_corr2_map_' +
                                   sci_list[0])
        bpix_map_sci_1 = open_fits(self.outpath + '2_bpix_corr2_map_' +
                                   sci_list[-1])
        bpix_map_sky_0 = open_fits(self.outpath + '2_bpix_corr2_map_' +
                                   sky_list[0])
        bpix_map_sky_1 = open_fits(self.outpath + '2_bpix_corr2_map_' +
                                   sky_list[-1])
        bpix_map_unsat_0 = open_fits(self.outpath + '2_bpix_corr2_map_unsat_' +
                                     unsat_list[0])
        bpix_map_unsat_1 = open_fits(self.outpath + '2_bpix_corr2_map_unsat_' +
                                     unsat_list[-1])
        plot_frames(
            bpix_map_ori,
            bpix_map_sci_0,
            bpix_map_sci_1,  #tmp_tmp_tmp, #bpix_tmp,
            bpix_map_sky_0,
            bpix_map_sky_1,  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
            bpix_map_unsat_0,
            bpix_map_unsat_1  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
        )

        tmpSCI = open_fits(self.outpath + 'TMP_2_master_median_SCI.fits')
        tmpSKY = open_fits(self.outpath + 'TMP_2_master_median_SKY.fits')
        if not debug:
            tmp_tmp = open_fits(self.outpath + '2_bpix_corr2_' +
                                sci_list[1])[-1]
            tmp_tmp2 = open_fits(self.outpath + '2_bpix_corr2_' +
                                 sci_list[-1])[-1]
            plot_frames(  #tmp, tmp-tmpSKY, #tmp_tmp_tmp, #bpix_tmp,
                tmp_tmp,
                tmp_tmp -
                tmpSKY,  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
                #tmp2, tmp2-tmpSKY, #tmp_tmp_tmp, #bpix_tmp,
                tmp_tmp2,
                tmp_tmp2 -
                tmpSKY  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
            )
        else:
            # COMPARE BEFORE AND AFTER BPIX CORR (without sky subtr)
            tmp = open_fits(self.outpath + '2_crop_' + sci_list[-1])[-1]
            tmp_tmp = open_fits(self.outpath + '2_bpix_corr2_' +
                                sci_list[-1])[-1]
            tmp2 = open_fits(self.outpath + '2_crop_' + sky_list[-1])[-1]
            tmp_tmp2 = open_fits(self.outpath + '2_bpix_corr2_' +
                                 sky_list[-1])[-1]
            (
                tmp,
                tmp - tmpSKY,  #tmp_tmp_tmp, #bpix_tmp,
                tmp_tmp,
                tmp_tmp -
                tmpSKY,  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
                tmp2,
                tmp2 - tmpSKY,  #tmp_tmp_tmp, #bpix_tmp,
                tmp_tmp2,
                tmp_tmp2 -
                tmpSKY  #, #tmp_tmp_tmp_tmp, #bpix_tmp_tmp, tmpSCI-tmpSKY
            )
示例#25
0
def fit_2dairydisk(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhm=4,
                   threshold=False,
                   sigfactor=6,
                   full_output=True,
                   debug=True):
    """ Fitting a 2D Airy to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhm : float, optional
        Initial values for the FWHM of the fitted 2d Airy disk, in px.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Moffat
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    'amplitude' : Float value. Moffat Amplitude.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm' : Float value. FHWM [px].

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2d Airy disk model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = cen_com(psf_subimage)
    diam_1st_zero = (fwhm * 2.44) / 1.028
    airy = models.AiryDisk2D(amplitude=init_amplitude,
                             x_0=xcom,
                             y_0=ycom,
                             radius=diam_1st_zero / 2.)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(airy, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_0.value + suby
        mean_x = fit.x_0.value + subx
    else:
        mean_y = fit.y_0.value
        mean_x = fit.x_0.value

    amplitude = fit.amplitude.value
    radius = fit.radius.value
    fwhm = ((radius * 1.028) / 2.44) * 2

    # compute uncertainties
    if fitter.fit_info['param_cov'] is not None:
        perr = np.sqrt(np.diag(fitter.fit_info['param_cov']))
        amplitude_err, mean_x_err, mean_y_err, radius_err = perr
        fwhm_err = ((radius_err * 1.028) / 2.44) * 2
    else:
        amplitude_err, mean_x_err, mean_y_err = None, None, None
        radius_err, fwhm_err = None, None

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM =', fwhm)
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_0.value)
        print('centroid x subim =', fit.x_0.value, '\n')
        print('amplitude =', amplitude)
        print('radius =', radius)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm': fwhm,
                'radius': radius,
                'amplitude': amplitude,
                'centroid_y_err': mean_y_err,
                'centroid_x_err': mean_x_err,
                'fwhm_err': fwhm_err,
                'radius_err': radius_err,
                'amplitude_err': amplitude_err
            },
            index=[0])
    else:
        return mean_y, mean_x
示例#26
0
def chisquare(modelParameters,
              cube,
              angs,
              plsc,
              psfs_norm,
              fwhm,
              annulus_width,
              aperture_radius,
              initialState,
              ncomp,
              cube_ref=None,
              svd_mode='lapack',
              scaling=None,
              fmerit='sum',
              collapse='median',
              algo=pca_annulus,
              delta_rot=1,
              imlib='opencv',
              interpolation='lanczos4',
              algo_options={},
              transmission=None,
              mu_sigma=None,
              weights=None,
              force_rPA=False,
              debug=False):
    """
    Calculate the reduced chi2:
    \chi^2_r = \frac{1}{N-3}\sum_{j=1}^{N} |I_j|,
    where N is the number of pixels within a circular aperture centered on the 
    first estimate of the planet position, and I_j the j-th pixel intensity.
    
    Parameters
    ----------    
    modelParameters: tuple
        The model parameters, typically (r, theta, flux).
    cube: numpy.array
        The cube of fits images expressed as a numpy.array.
    angs: numpy.array
        The parallactic angle fits image expressed as a numpy.array. 
    plsc: float
        The platescale, in arcsec per pixel.
    psfs_norm: numpy.array
        The scaled psf expressed as a numpy.array.    
    fwhm : float
        The FHWM in pixels.
    annulus_width: int, optional
        The width in pixels of the annulus on which the PCA is done.       
    aperture_radius: int, optional
        The radius of the circular aperture in terms of the FWHM.
    initialState: numpy.array
        Position (r, theta) of the circular aperture center.
    ncomp: int or None
        The number of principal components for PCA-based algorithms.
    cube_ref : numpy ndarray, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.         
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    algo: python routine, opt {pca_annulus, pca_annular, pca, custom}
        Routine to be used to model and subtract the stellar PSF. From an input
        cube, derotation angles, and optional arguments, it should return a 
        post-processed frame.
    delta_rot: float, optional
        If algo is set to pca_annular, delta_rot is the angular threshold used
        to select frames in the PCA library (see description of pca_annular).
    imlib : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
    interpolation : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
    algo_options: dict, opt
        Dictionary with additional parameters related to the algorithm 
        (e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip
        routine, this dict should contain all necessary arguments apart from
        the cube and derotation angles. Note: arguments such as ncomp, svd_mode,
        scaling, imlib, interpolation or collapse can also be included in this
        dict (the latter are also kept as function arguments for consistency
        with older versions of vip). 
    transmission: numpy array, optional
        Array with 2 columns. First column is the radial separation in pixels. 
        Second column is the off-axis transmission (between 0 and 1) at the 
        radial separation given in column 1.
    mu_sigma: tuple of 2 floats or None, opt
        If set to None: not used, and falls back to original version of the 
        algorithm, using fmerit.
        If set to anything but None: will compute the mean and standard 
        deviation of pixel intensities in an annulus centered on the location 
        of the companion, excluding the area directly adjacent to the companion.
    weights : 1d array, optional
        If provided, the negative fake companion fluxes will be scaled according
        to these weights before injection in the cube. Can reflect changes in 
        the observing conditions throughout the sequence.
    force_rPA: bool, optional
        Whether to only search for optimal flux, provided (r,PA).
    debug: bool, opt
        Whether to debug and plot the post-processed frame after injection of 
        the negative fake companion.
        
    Returns
    -------
    out: float
        The reduced chi squared.
        
    """
    if force_rPA:
        r, theta = initialState
        flux_tmp = modelParameters[0]
    else:
        try:
            r, theta, flux_tmp = modelParameters
        except TypeError:
            msg = 'modelParameters must be a tuple, {} was given'
            print(msg.format(type(modelParameters)))

    if weights is None:
        flux = -flux_tmp
        norm_weights = weights
    else:
        flux = -flux_tmp * weights
        norm_weights = weights / np.sum(weights)

    # Create the cube with the negative fake companion injected
    cube_negfc = cube_inject_companions(cube,
                                        psfs_norm,
                                        angs,
                                        flevel=flux,
                                        plsc=plsc,
                                        rad_dists=[r],
                                        n_branches=1,
                                        theta=theta,
                                        imlib=imlib,
                                        verbose=False,
                                        interpolation=interpolation,
                                        transmission=transmission)

    # Perform PCA and extract the zone of interest
    res = get_values_optimize(cube_negfc,
                              angs,
                              ncomp,
                              annulus_width,
                              aperture_radius,
                              fwhm,
                              initialState[0],
                              initialState[1],
                              cube_ref=cube_ref,
                              svd_mode=svd_mode,
                              scaling=scaling,
                              algo=algo,
                              delta_rot=delta_rot,
                              collapse=collapse,
                              algo_options=algo_options,
                              weights=norm_weights,
                              debug=debug)

    if debug and collapse is not None:
        values, frpca = res
        plot_frames(frpca)
    else:
        values = res

    # Function of merit
    if mu_sigma is None:
        # old version - delete?
        if fmerit == 'sum':
            chi = np.sum(np.abs(values))
        elif fmerit == 'stddev':
            values = values[values != 0]
            chi = np.std(values) * values.size
        else:
            raise RuntimeError('fmerit choice not recognized.')
    else:
        # true expression of a gaussian log probability
        mu = mu_sigma[0]
        sigma = mu_sigma[1]
        chi = np.sum(np.power(mu - values, 2) / sigma**2)

    return chi
示例#27
0
def _pairwise_ann(ann,
                  n_annuli,
                  fwhm,
                  angles,
                  delta_rot,
                  metric,
                  dist_threshold,
                  n_similar,
                  radius_int,
                  asize,
                  ncomp,
                  imlib,
                  interpolation,
                  verbose,
                  debug=False):
    """
    Helper functions for pair-wise subtraction for a single annulus.
    """
    start_time = time_ini(False)

    n_frames = array.shape[0]

    pa_threshold, in_rad, ann_center = _define_annuli(angles, ann, n_annuli,
                                                      fwhm, radius_int, asize,
                                                      delta_rot, 1, verbose)
    if ncomp is not None:
        arrayin = pca_annulus(array,
                              None,
                              ncomp,
                              asize,
                              ann_center,
                              svd_mode='lapack',
                              scaling=None,
                              collapse=None)
    else:
        arrayin = array

    yy, xx = get_annulus_segments(array[0],
                                  inner_radius=in_rad,
                                  width=asize,
                                  nsegm=1)[0]
    values = arrayin[:, yy, xx]  # n_frames x n_pxs_annulus

    if debug:
        print('Done taking pixel intensities from annulus.')
        timing(start_time)

    mat_dists_ann_full = pairwise_distances(values, metric=metric)

    if pa_threshold > 0:
        mat_dists_ann = np.zeros_like(mat_dists_ann_full)
        for i in range(n_frames):
            ind_fr_i = _find_indices_adi(angles, i, pa_threshold, None, False)
            mat_dists_ann[i][ind_fr_i] = mat_dists_ann_full[i][ind_fr_i]
    else:
        mat_dists_ann = mat_dists_ann_full

    if debug:
        msg = 'Done calculating the {} distance for annulus {}'
        print(msg.format(metric, ann + 1))
        timing(start_time)

    threshold = np.percentile(mat_dists_ann[mat_dists_ann != 0],
                              dist_threshold)
    mat_dists_ann[mat_dists_ann > threshold] = np.nan
    mat_dists_ann[mat_dists_ann == 0] = np.nan
    if not mat_dists_ann[~np.isnan(mat_dists_ann)].size > 0:
        raise RuntimeError('No pairs left. Decrease thresholds')

    if debug:
        plot_frames(mat_dists_ann)
        print('Done thresholding/checking distances.')
        timing(start_time)

    # median of n ``n_similar`` most similar patches
    cube_res = []
    if n_similar is not None:
        if n_similar < 3:
            raise ValueError("n_similar must be >= 3 or None")
        for i in range(n_frames):
            vector = pn.DataFrame(mat_dists_ann[i])
            if vector.sum().values == 0:
                continue
            else:
                vector_sorted = vector.i.sort_values()[:n_similar]
                ind_n_similar = vector_sorted.index.values
                # median subtraction
                res = values[i] - np.median((values[ind_n_similar]), axis=0)
                cube_res.append(res)

        cube_res = np.array(cube_res)

    # taking just the most similar frame
    else:
        ind = []
        for i in range(n_frames):
            vector = pn.DataFrame(mat_dists_ann[i])
            if vector.sum().values == 0:
                continue
            else:
                ind.append((i, vector.idxmin().tolist()[0]))
                ind.append((vector.idxmin().tolist()[0], i))

        if debug:
            print('Done finding pairs. Total found: ', len(ind) / 2)
            timing(start_time)

        df = pn.DataFrame(ind)  # sorting using pandas dataframe
        df.columns = ['i', 'j']
        df = df.sort_values('i')

        indices = df.values
        indices = indices.astype(int)  # back to a ndarray int type

        size = indices.shape[0]
        angles_list = np.zeros((size))
        for i in range(size):
            angles_list[i] = angles[indices[i]
                                    [0]]  # filter of the angles vector

        cube_res = np.zeros((size, yy.shape[0]))
        # pair-wise subtraction
        for i in range(size):
            res = values[indices[i][0]] - values[indices[i][1]]
            cube_res[i] = res

    cube_out = np.zeros((array.shape[0], array.shape[1], array.shape[2]))
    for i in range(n_frames):
        cube_out[i, yy, xx] = cube_res[i]

    return cube_out
示例#28
0
文件: fit_2d.py 项目: carlgogo/VIP
def fit_2dairydisk(array, crop=False, cent=None, cropsize=15, fwhm=4,
                   threshold=False, sigfactor=6, full_output=True,
                   debug=True):
    """ Fitting a 2D Moffat to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True an square sub image will be cropped.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhm : float, optional
        Initial values for the FWHM of the fitted 2d Moffat, in px.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Moffat
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    'alpha': Float value. Alpha parameter.
    'amplitude' : Float value. Moffat Amplitude.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm' : Float value. FHWM [px].
    'gamma' : Float value. Gamma parameter.

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
                                              ceny, cenx, position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2d Airy disk model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = photutils.centroid_com(psf_subimage)
    diam_1st_zero = (fwhm * 2.44) / 1.028
    airy = models.AiryDisk2D(amplitude=init_amplitude, x_0=xcom, y_0=ycom,
                             radius=diam_1st_zero/2.)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(airy, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_0.value + suby
        mean_x = fit.x_0.value + subx
    else:
        mean_y = fit.y_0.value
        mean_x = fit.x_0.value

    amplitude = fit.amplitude.value
    radius = fit.radius.value
    fwhm = ((radius * 1.028) / 2.44) * 2

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True, grid_spacing=1, label=label)
        print('FWHM =', fwhm)
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_0.value)
        print('centroid x subim =', fit.x_0.value, '\n')
        print('amplitude =', amplitude)
        print('radius =', radius)

    if full_output:
        return pd.DataFrame({'centroid_y': mean_y, 'centroid_x': mean_x,
                             'fwhm': fwhm, 'radius': radius,
                             'amplitude': amplitude}, index=[0])
    else:
        return mean_y, mean_x
示例#29
0
def CenteringLoop(cube_orig, angs, psf):

    #---------------------------------------------------------------
    print("Recentering Cube")
    print(" There are two ways to recenter the cube")
    print(" 1. Use a 2D Gaussian Fit")
    print(
        " 2. Recenter manually. (Use if Gaussian fit doesn't work. Works if all images are already aligned but not in the center of the image)"
    )

    while True:
        print(" Choose 1 (Gaussian fit) or 2 (manual fit): ")
        fit_method = input()
        if fit_method == '1' or fit_method == '2':
            break
        else:
            print(" ->Option not recognised, please input '1' or '2'. ")

    # Initialise variable
    star_xy = [0.1, 0.1]

    print("Using DS9, open any image in Newcube.fits")
    star_xy[0] = input(
        "Input the x coordinate of the central position of the star: ")
    star_xy[0] = int(star_xy[0])

    star_xy[1] = input(
        "Input the x coordinate of the central position of the star: ")
    star_xy[1] = int(star_xy[1])

    #Opens the cube (using VIP) with cube_orig as HDU:0 and calls it cube_orig,
    #and the parallactic angles from a text file.
    #Uses VIP to also open the point spread function previously loaded in.

    cube1 = cube_orig

    # Gaussian Fit
    if fit_method == '1':
        print(" --2D Gaussian Fit")
        print("Fitting a 2D gaussian to centre the images...")
        #Uses VIP's 2D gaussian fitting algorithm to centre the cube.
        cube1, shy1, shx1, fwhm = Gaussian_2d_Fit(psf, cube_orig, star_xy)

    # Manual Fit
    elif fit_method == '2':
        print(" --Manual Fit")
        # Calculate shifts here
        image_centre = [512, 512]
        print(" Image centre is at", image_centre)
        shift_x = image_centre[0] - star_xy[0]
        shift_y = image_centre[1] - star_xy[1]
        cube1 = vip.preproc.recentering.cube_shift(cube_orig, shift_y, shift_x)
        fwhm = Calculate_fwhm(psf)

    #Writes the values of the centered cube into a fits file.
    vip.fits.write_fits('centeredcube_{name}.fits'.format(name=name_input),
                        cube1,
                        verbose=True)

    cube = cube1
    #Loads up the centered cube.
    #Plots the original cube vs the new centered cube.

    im1 = vip.preproc.cosmetics.frame_crop(cube_orig[0], 1000, verbose=False)
    im2 = vip.preproc.cosmetics.frame_crop(cube[0], 1000, verbose=False)

    hciplot.plot_frames(
        (im1, im2),
        label=('Original first frame', 'First frame after recentering'),
        grid=True,
        size_factor=4)

    print(
        "Open DS9 and look through the centred data cube at each frame making sure it is centred."
    )
    print("If you're not happy with it, redo centring")
    print("Redo Centring?")
    cent_loop = Checkfunction()

    return cube, cent_loop
示例#30
0
    def plot_detmaps(self, i=None, thr=9, dpi=100,
                     axis=True, grid=False, vmin=-10, vmax='max',
                     plot_type="horiz"):
        """
        Plot the detection maps for one injection.

        Parameters
        ----------
        i : int or None, optional
            Index of the injection, between 0 and self.n_injections. If None,
            takes the 30st injection, or if there are less injections, the
            middle one.
        thr : int, optional
            Index of the threshold.
        dpi, axis, grid, vmin, vmax
            Passed to ``pp_subplots``
        plot_type : {"horiz" or "vert"}, optional
            Plot type.

            ``horiz``
                One row per algorithm (frame, probmap, binmap)
            ``vert``
                1 row for final frames, 1 row for probmaps and 1 row for binmaps

        """
        # input parameters
        if i is None:
            if len(self.list_xy) > 30:
                i = 30
            else:
                i = len(self.list_xy) // 2

        if vmax == 'max':
            # TODO: document this feature.
            vmax = np.concatenate([m.frames[i] for m in self.methods if
                                   hasattr(m, "frames") and
                                   len(m.frames) >= i]).max()/2

        # print information
        print('X,Y: {}'.format(self.list_xy[i]))
        print('dist: {:.3f}, flux: {:.3f}'.format(self.dists[i],
                                                  self.fluxes[i]))
        print()

        if plot_type in [1, "horiz"]:
            for m in self.methods:
                print('detection state: {} | false postives: {}'.format(
                    m.detections[i][thr], m.fps[i][thr]))
                labels = ('{} frame'.format(m.name), '{} S/Nmap'.format(m.name),
                          'Thresholded at {:.1f}'.format(m.thresholds[thr]))
                plot_frames((m.frames[i] if len(m.frames) >= i else
                            np.zeros((2, 2)), m.probmaps[i], m.bmaps[i][thr]),
                            label=labels, dpi=dpi, horsp=0.2, axis=axis,
                            grid=grid, cmap=['viridis', 'viridis', 'gray'])

        elif plot_type in [2, "vert"]:
            labels = tuple('{} frame'.format(m.name) for m in self.methods if
                           hasattr(m, "frames") and len(m.frames) >= i)
            plot_frames(tuple(m.frames[i] for m in self.methods if
                        hasattr(m, "frames") and len(m.frames) >= i),
                        dpi=dpi, label=labels, vmax=vmax, vmin=vmin, axis=axis,
                        grid=grid)

            plot_frames(tuple(m.probmaps[i] for m in self.methods), dpi=dpi,
                        label=tuple(['{} S/Nmap'.format(m.name) for m in
                                     self.methods]), axis=axis, grid=grid)

            for m in self.methods:
                msg = '{} detection: {}, FPs: {}'
                print(msg.format(m.name, m.detections[i][thr], m.fps[i][thr]))

            labels = tuple('Thresholded at {:.1f}'.format(m.thresholds[thr])
                           for m in self.methods)
            plot_frames(tuple(m.bmaps[i][thr] for m in self.methods),
                        dpi=dpi, label=labels, axis=axis, grid=grid,
                        colorbar=False, cmap='bone')
        else:
            raise ValueError("`plot_type` unknown")
示例#31
0
def _get_adi_snrs(psf,
                  angle_list,
                  fwhm,
                  plsc,
                  flux_dist_theta_all,
                  wavelengths=None,
                  spectrum=None,
                  mode='pca',
                  n_ks=3,
                  scaling='temp-standard',
                  svd_mode='randsvd',
                  debug=False):
    """ Get the mean S/N (at 3 equidistant positions) for a given flux and
    distance, on a residual frame.
    """
    theta = flux_dist_theta_all[2]
    flux = flux_dist_theta_all[0]
    dist = flux_dist_theta_all[1]

    if GARRAY.ndim == 3:
        spectrum = 1
    elif GARRAY.ndim == 4:
        # grey spectrum (same flux in all wls)
        if spectrum is None:
            spectrum = np.ones((GARRAY.shape[0]))

    snrs = []
    # 3 equidistant azimuthal positions, 1 or several K values
    for ang in [theta, theta + 120, theta + 240]:
        cube_fc, pos = cube_inject_companions(GARRAY,
                                              psf,
                                              angle_list,
                                              flevel=flux * spectrum,
                                              plsc=plsc,
                                              rad_dists=[dist],
                                              theta=ang,
                                              verbose=False,
                                              full_output=True)
        posy, posx = pos[0]
        fr_temp = _compute_residual_frame(cube_fc, angle_list, dist, fwhm,
                                          wavelengths, mode, n_ks, svd_mode,
                                          scaling, 'median', 'opencv',
                                          'bilinear')
        # handling the case of mode='median'
        if isinstance(fr_temp, np.ndarray):
            fr_temp = [fr_temp]
        snrs_ks = []
        for i in range(len(fr_temp)):
            res = snr(fr_temp[i],
                      source_xy=(posx, posy),
                      fwhm=fwhm,
                      exclude_negative_lobes=True)
            snrs_ks.append(res)

        maxsnr_ks = max(snrs_ks)
        if np.isinf(maxsnr_ks) or np.isnan(maxsnr_ks) or maxsnr_ks < 0:
            maxsnr_ks = 0.01

        snrs.append(maxsnr_ks)

        if debug:
            print(' ')
            cy, cx = frame_center(GARRAY[0])
            label = 'Flux: {:.1f}, Max S/N: {:.2f}'.format(flux, maxsnr_ks)
            hp.plot_frames(tuple(np.array(fr_temp)),
                           axis=False,
                           horsp=0.05,
                           colorbar=False,
                           circle=((posx, posy), (cx, cy)),
                           circle_radius=(5, dist),
                           label=label,
                           dpi=60)

    # max of mean S/N at 3 equidistant positions
    snr_value = np.max(snrs)

    return flux, snr_value
示例#32
0
def compute_binary_map(frame, thresholds, injections, fwhm, npix=1,
                       overlap_threshold=0.7, max_blob_fact=2, plot=False,
                       debug=False):
    """
    Take a list of ``thresholds``, create binary maps and counts detections/fps.
    A blob which is "too big" is split into apertures, and every aperture adds
    one 'false positive'.

    Parameters
    ----------
    frame : numpy ndarray
        Detection map.
    thresholds : list or numpy ndarray
        List of thresholds (detection criteria).
    injections : tuple, list of tuples
        Coordinates (x,y) of the injected companions. Also accepts 1d/2d
        ndarrays.
    fwhm : float
        FWHM, used for obtaining the size of the circular aperture centered at
        the injection position (and measuring the overlapping with found blobs).
        The circular aperture has 2 * FWHM in diameter.
    npix : int, optional
        The number of connected pixels, each greater than the given threshold,
        that an object must have to be detected. ``npix`` must be a positive
        integer. Passed to ``detect_sources`` function from ``photutils``.
    overlap_threshold : float
        Percentage of overlap a blob has to have with the aperture around an
        injection.
    max_blob_fact : float
        Maximum size of a blob (in multiples of the resolution element) before
        it is considered as "too big" (= non-detection).
    plot : bool, optional
        If True, a final resulting plot summarizing the results will be shown.
    debug : bool, optional
        For showing optional information.

    Returns
    -------
    list_detections : list of int
        List of detection count for each threshold.
    list_fps : list of int
        List of false positives count for each threshold.
    list_binmaps : list of 2d ndarray
        List of binary maps: detection maps thresholded for each threshold
        value.

    """
    def _overlap_injection_blob(injection, fwhm, blob_mask):
        """
        Parameters
        ----------
        injection: tuple (y,x)
        fwhm : float
        blob_mask : 2d bool ndarray

        Returns
        -------
        overlap_fact : float between 0 and 1
            Percentage of the area overlap. If the blob is smaller than the
            resolution element, this is ``intersection_area / blob_area``,
            otherwise ``intersection_area / resolution_element``.

        """
        injection_mask = get_circle(np.ones_like(blob_mask), radius=fwhm,
                                    cy=injection[1], cx=injection[0],
                                    mode="mask")
        intersection = injection_mask & blob_mask
        smallest_area = min(blob_mask.sum(), injection_mask.sum())
        return intersection.sum() / smallest_area
    # --------------------------------------------------------------------------
    list_detections = []
    list_fps = []
    list_binmaps = []
    sizey, sizex = frame.shape
    cy, cx = frame_center(frame)
    reselem_mask = get_circle(frame, radius=fwhm, cy=cy, cx=cx, mode="val")
    npix_circ_aperture = reselem_mask.shape[0]

    # normalize injections: accepts combinations of 1d/2d and tuple/list/array.
    injections = np.asarray(injections)
    if injections.ndim == 1:
        injections = np.array([injections])

    for ithr, threshold in enumerate(thresholds):
        if debug:
            print("\nprocessing threshold #{}: {}".format(ithr + 1, threshold))

        segments = detect_sources(frame, threshold, npix, connectivity=4)
        binmap = (segments.data != 0)

        if debug:
            plot_frames((segments.data, binmap), cmap=('tab20b', 'binary'),
                        circle=tuple(tuple(xy) for xy in injections),
                        circle_radius=fwhm, circle_alpha=0.6,
                        label=("segmentation map", "binary map"))

        detections = 0
        fps = 0

        for segment in segments.segments:
            label = segment.label
            blob_mask = (segments.data == label)
            blob_area = segment.area

            if debug:
                lab = "blob #{}, area={}px**2".format(label, blob_area)
                plot_frames(blob_mask, circle_radius=fwhm, circle_alpha=0.6,
                            circle=tuple(tuple(xy) for xy in injections),
                            cmap='binary', label_size=8, label=lab,
                            size_factor=3)

            for iinj, injection in enumerate(injections):
                if injection[0] > sizex or injection[1] > sizey:
                    raise ValueError("Wrong coordinates in `injections`")

                if debug:
                    print("\ttesting injection #{} at {}".format(iinj + 1,
                                                                 injection))

                if blob_area > max_blob_fact * npix_circ_aperture:
                    number_of_apertures_in_blob = blob_area / npix_circ_aperture
                    fps += number_of_apertures_in_blob  # float, rounded at end
                    if debug:
                        print("\tblob is too big (+{:.0f} fps)"
                              "".format(number_of_apertures_in_blob))
                        print("\tskipping all other injections")
                    # continue with next blob, do not check other injections
                    break

                overlap = _overlap_injection_blob(injection, fwhm, blob_mask)
                if overlap > overlap_threshold:
                    if debug:
                        print("\toverlap of {}! (+1 detection)"
                              "".format(overlap))

                    detections += 1
                    # continue with next blob, do not check other injections
                    break

                if debug:
                    print("\toverlap of {} -> do nothing".format(overlap))

            else:
                if debug:
                    print("\tdid not find a matching injection for this "
                          "blob (+1 fps)")
                fps += 1

        if debug:
            print("done with threshold #{}".format(ithr))
            print("result: {} detections, {} fps".format(detections, fps))

        fps = np.round(fps).astype(int).item()  # -> python `int`

        list_detections.append(detections)
        list_binmaps.append(binmap)
        list_fps.append(fps)

    if plot:
        labs = tuple(str(det) + ' detections' + '\n' + str(fps) +
                     ' false positives' for det, fps in zip(list_detections,
                                                            list_fps))
        plot_frames(tuple(list_binmaps), title='Final binary maps', label=labs,
                    label_size=8, cmap='binary', circle_alpha=0.8,
                    circle=tuple(tuple(xy) for xy in injections),
                    circle_radius=fwhm, circle_color='deepskyblue', axis=False)

    return list_detections, list_fps, list_binmaps
示例#33
0
def fit_2dgaussian(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhmx=4,
                   fwhmy=4,
                   theta=0,
                   threshold=False,
                   sigfactor=6,
                   full_output=True,
                   debug=True):
    """ Fitting a 2D Gaussian to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle,
        and the uncertainties on each parameter.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
        'centroid_y': Y coordinate of the centroid.
        'centroid_x': X coordinate of the centroid.
        'fwhm_y': Float value. FHWM in X [px].
        'fwhm_x': Float value. FHWM in Y [px].
        'amplitude': Amplitude of the Gaussian.
        'theta': Float value. Rotation angle. 
        # and fit uncertainties on the above values: 
        'centroid_y_err'
        'centroid_x_err'
        'fwhm_y_err'
        'fwhm_x_err'
        'amplitude_err' 
        'theta_err'

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2D Gaussian model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = cen_com(psf_subimage)
    gauss = models.Gaussian2D(amplitude=init_amplitude,
                              theta=theta,
                              x_mean=xcom,
                              y_mean=ycom,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = np.rad2deg(fit.theta.value)

    # compute uncertainties
    if fitter.fit_info['param_cov'] is not None:
        perr = np.sqrt(np.diag(fitter.fit_info['param_cov']))
        amplitude_e, mean_x_e, mean_y_e, fwhm_x_e, fwhm_y_e, theta_e = perr
        fwhm_x_e /= gaussian_fwhm_to_sigma
        fwhm_y_e /= gaussian_fwhm_to_sigma
    else:
        amplitude_e, theta_e, mean_x_e = None, None, None
        mean_y_e, fwhm_x_e, fwhm_y_e = None, None, None

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x, '\n')
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean.value)
        print('centroid x subim =', fit.x_mean.value, '\n')
        print('amplitude =', amplitude)
        print('theta =', theta)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm_y': fwhm_y,
                'fwhm_x': fwhm_x,
                'amplitude': amplitude,
                'theta': theta,
                'centroid_y_err': mean_y_e,
                'centroid_x_err': mean_x_e,
                'fwhm_y_err': fwhm_y_e,
                'fwhm_x_err': fwhm_x_e,
                'amplitude_err': amplitude_e,
                'theta_err': theta_e
            },
            index=[0],
            dtype=np.float64)
    else:
        return mean_y, mean_x
示例#34
0
def snrmap(array,
           fwhm,
           approximated=False,
           plot=False,
           known_sources=None,
           nproc=None,
           array2=None,
           use2alone=False,
           exclude_negative_lobes=False,
           verbose=True,
           **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.
    
    The S/N is computed as in Mawet et al. (2014) for each radial separation.    
    https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract
    
    *** DISCLAIMER ***
    Signal-to-noise ratio is not significance! For a conversion from snr to 
    n-sigma (i.e. the equivalent confidence level of a Gaussian n-sigma), use 
    the significance() function.    
    
    
    Parameters
    ----------
    array : numpy ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    approximated : bool, optional
        If True, an approximated S/N map is generated.
    plot : bool, optional
        If True plots the S/N map. False by default.
    known_sources : None, tuple or tuple of tuples, optional
        To take into account existing sources. It should be a tuple of float/int
        or a tuple of tuples (of float/int) with the coordinate(s) of the known
        sources.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d numpy ndarray
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()

    check_array(array, dim=2, msg='array')
    sizey, sizex = array.shape
    snrmap_array = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if known_sources is None:

        # proxy to S/N calculation
        if approximated:
            cy, cx = frame_center(array)
            tophat_kernel = Tophat2DKernel(fwhm / 2)
            array = convolve(array, tophat_kernel)
            width = min(sizey, sizex) / 2 - 1.5 * fwhm
            mask = get_annulus_segments(array, (fwhm / 2) + 1,
                                        width - 1,
                                        mode="mask")[0]
            mask = np.ma.make_mask(mask)
            yy, xx = np.where(mask)
            coords = [(int(x), int(y)) for (x, y) in zip(xx, yy)]
            res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm,
                           cy, cx)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, 2]
            snrmap_array[yy.astype(int), xx.astype(int)] = snr_value

        # computing s/n map with Mawet+14 definition
        else:
            res = pool_map(nproc, snr, array, iterable(coords), fwhm, True,
                           array2, use2alone, exclude_negative_lobes)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, -1]
            snrmap_array[yy.astype('int'), xx.astype('int')] = snr_value

    # masking known sources
    else:
        if not isinstance(known_sources, tuple):
            raise TypeError("`known_sources` must be a tuple or tuple of "
                            "tuples")
        else:
            source_mask = np.zeros_like(array)
            if isinstance(known_sources[0], tuple):
                for coor in known_sources:
                    source_mask[coor[::-1]] = 1
            elif isinstance(known_sources[0], int):
                source_mask[known_sources[1], known_sources[0]] = 1
            else:
                raise TypeError("`known_sources` seems to have wrong type. It "
                                "must be a tuple of ints or tuple of tuples "
                                "(of ints)")

        # checking the mask with the sources
        if source_mask[source_mask == 1].shape[0] > 50:
            msg = 'Input source mask is too crowded (check its validity)'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 1)
        sources = []
        coor_ann = []
        arr_masked_sources = array.copy()
        centery, centerx = frame_center(array)
        for y, x in zip(soury, sourx):
            radd = dist(centery, centerx, int(y), int(x))
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, int(y), int(x))
            anny, annx = get_annulus_segments(array, int(radd - fwhm),
                                              int(np.round(3 * fwhm)))[0]

            ciry, cirx = disk((y, x), int(np.ceil(fwhm)))
            # masking the sources positions (using the MAD of pixels in annulus)
            arr_masked_sources[ciry, cirx] = mad(array[anny, annx])

            # S/Ns of annulus without the sources
            coor_ann = [(x, y) for (x, y) in zip(annx, anny)
                        if (x, y) not in zip(cirx, ciry)]
            res = pool_map(nproc, snr, arr_masked_sources, iterable(coor_ann),
                           fwhm, True, array2, use2alone,
                           exclude_negative_lobes)
            res = np.array(res)
            yy_res = res[:, 0]
            xx_res = res[:, 1]
            snr_value = res[:, 4]
            snrmap_array[yy_res.astype('int'),
                         xx_res.astype('int')] = snr_value
            coor_ann += coor_ann

        # S/Ns of the rest of the frame without the annulus
        coor_rest = [(x, y) for (x, y) in zip(xx, yy)
                     if (x, y) not in coor_ann]
        res = pool_map(nproc, snr, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone, exclude_negative_lobes)
        res = np.array(res)
        yy_res = res[:, 0]
        xx_res = res[:, 1]
        snr_value = res[:, 4]
        snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value

    if plot:
        plot_frames(snrmap_array, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes".format(nproc))
        timing(start_time)
    return snrmap_array
示例#35
0
def fit_2d2gaussian(array,
                    crop=False,
                    cent=None,
                    cropsize=15,
                    fwhm_neg=4,
                    fwhm_pos=4,
                    theta_neg=0,
                    theta_pos=0,
                    neg_amp=1,
                    fix_neg=True,
                    threshold=False,
                    sigfactor=2,
                    full_output=False,
                    debug=True):
    """ Fitting a 2D superimposed double Gaussian (negative and positive) to 
    the 2D distribution of the data (reproduce e.g. the effect of a coronagraph)

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of float, optional
        X,Y position of the source in the array for extracting the 
        subimage. If None the center of the frame is used for cropping the 
        subframe. If fix_neg is set to True, this will also be used as the 
        fixed position of the negative gaussian.
    cropsize : int, optional
        Size of the subimage.
    fwhm_neg, fwhm_pos : float or tuple of floats, optional
        Initial values for the FWHM of the fitted negative and positive 
        Gaussians, in px. If a tuple, should be the FWHM value along x and y.
    theta_neg, theta_pos: float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis (only matters if a tuple was provided for fwhm_neg or fwhm_pos).
    neg_amp: float, optional
        First guess on the amplitude of the negative gaussian, relative to the
        amplitude of the positive gaussian (i.e. 1 means the negative gaussian
        has the same amplitude as the positive gaussian)
    fix_neg: bool, optional
        Whether to fix the position and FWHM of the negative gaussian for a 
        fit with less free parameters. In that case, the center of the negative
        gaussian is assumed to be cent
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle,
        and the uncertainties on each parameter.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    - for the positive gaussian:
    'amplitude' : Float value. Amplitude of the Gaussian.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm_x' : Float value. FHWM in X [px].
    'fwhm_y' : Float value. FHWM in Y [px].
    'theta' : Float value. Rotation angle of x axis
    - for the negative gaussian:
    'amplitude_neg' : Float value. Amplitude of the Gaussian.
    'centroid_x_neg' : Float value. X coordinate of the centroid.
    'centroid_y_neg' : Float value. Y coordinate of the centroid.
    'fwhm_x_neg' : Float value. FHWM in X [px].
    'fwhm_y_neg' : Float value. FHWM in Y [px].
    'theta_neg' : Float value. Rotation angle of x axis
    """
    if not array.ndim == 2:
        raise TypeError('Input array is not a frame or 2d array')

    if cent is None:
        ceny, cenx = frame_center(array)
    else:
        cenx, ceny = cent

    if crop:
        x_sub_px = cenx % 1
        y_sub_px = ceny % 1

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              int(ceny),
                                              int(cenx),
                                              position=True)
        ceny, cenx = frame_center(psf_subimage)
        ceny += y_sub_px
        cenx += x_sub_px
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    if isinstance(fwhm_neg, tuple):
        fwhm_neg_x, fwhm_neg_y = fwhm_neg
    else:
        fwhm_neg_x = fwhm_neg
        fwhm_neg_y = fwhm_neg

    if isinstance(fwhm_pos, tuple):
        fwhm_pos_x, fwhm_pos_y = fwhm_pos
    else:
        fwhm_pos_x = fwhm_pos
        fwhm_pos_y = fwhm_pos

    # Creating the 2D Gaussian model
    init_amplitude = np.ptp(psf_subimage)
    #xcom, ycom = cen_com(psf_subimage)
    ycom, xcom = frame_center(psf_subimage)
    fix_dico_pos = {'theta': True}
    bounds_dico_pos = {
        'amplitude': [0.8 * init_amplitude, 1.2 * init_amplitude],
        'x_mean': [xcom - 3, xcom + 3],
        'y_mean': [ycom - 3, ycom + 3],
        'x_stddev': [
            0.5 * fwhm_pos_x * gaussian_fwhm_to_sigma,
            2 * fwhm_pos_x * gaussian_fwhm_to_sigma
        ],
        'y_stddev': [
            0.5 * fwhm_pos_y * gaussian_fwhm_to_sigma,
            2 * fwhm_pos_y * gaussian_fwhm_to_sigma
        ]
    }

    gauss_pos = models.Gaussian2D(amplitude=init_amplitude,
                                  x_mean=xcom,
                                  y_mean=ycom,
                                  x_stddev=fwhm_pos_x * gaussian_fwhm_to_sigma,
                                  y_stddev=fwhm_pos_y * gaussian_fwhm_to_sigma,
                                  theta=np.deg2rad(theta_pos) % (np.pi),
                                  fixed=fix_dico_pos,
                                  bounds=bounds_dico_pos)
    if fix_neg:
        fix_dico_neg = {
            'x_mean': True,
            'y_mean': True,
            'x_stddev': True,
            'y_stddev': True,
            'theta': True
        }
        bounds_dico_neg = {
            'amplitude':
            [neg_amp * 0.5 * init_amplitude, neg_amp * 2 * init_amplitude]
        }
    else:
        fix_dico_neg = {}  #{'theta':True}
        bounds_dico_neg = {
            'amplitude':
            [neg_amp * 0.5 * init_amplitude, neg_amp * 2 * init_amplitude],
            'x_mean': [xcom - 3, xcom + 3],
            'y_mean': [ycom - 3, ycom + 3],
            'x_stddev': [
                0.5 * fwhm_neg_x * gaussian_fwhm_to_sigma,
                2 * fwhm_neg_x * gaussian_fwhm_to_sigma
            ],
            'y_stddev': [
                0.5 * fwhm_neg_y * gaussian_fwhm_to_sigma,
                2 * fwhm_neg_y * gaussian_fwhm_to_sigma
            ],
            'theta': [0, np.pi]
        }

    gauss_neg = models.Gaussian2D(amplitude=init_amplitude * neg_amp,
                                  x_mean=cenx,
                                  y_mean=ceny,
                                  x_stddev=fwhm_neg_x * gaussian_fwhm_to_sigma,
                                  y_stddev=fwhm_neg_y * gaussian_fwhm_to_sigma,
                                  theta=np.deg2rad(theta_neg) % (np.pi),
                                  fixed=fix_dico_neg,
                                  bounds=bounds_dico_neg)

    double_gauss = gauss_pos - gauss_neg

    fitter = fitting.LevMarLSQFitter()  #SLSQPLSQFitter() #LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(double_gauss, x, y, psf_subimage, maxiter=100000, acc=1e-08)

    # positive gaussian
    if crop:
        mean_y = fit.y_mean_0.value + suby
        mean_x = fit.x_mean_0.value + subx
    else:
        mean_y = fit.y_mean_0.value
        mean_x = fit.x_mean_0.value
    fwhm_y = fit.y_stddev_0.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev_0.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude_0.value
    theta = np.rad2deg(fit.theta_0.value)

    # negative gaussian
    if crop:
        mean_y_neg = fit.y_mean_1.value + suby
        mean_x_neg = fit.x_mean_1.value + subx
    else:
        mean_y_neg = fit.y_mean_1.value
        mean_x_neg = fit.x_mean_1.value
    fwhm_y_neg = fit.y_stddev_1.value * gaussian_sigma_to_fwhm
    fwhm_x_neg = fit.x_stddev_1.value * gaussian_sigma_to_fwhm
    amplitude_neg = fit.amplitude_1.value
    theta_neg = np.rad2deg(fit.theta_1.value)

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x, '\n')
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean_0.value)
        print('centroid x subim =', fit.x_mean_0.value, '\n')
        print('amplitude =', amplitude)
        print('theta =', theta)
        print('FWHM_y (neg) =', fwhm_y_neg)
        print('FWHM_x (neg) =', fwhm_x_neg, '\n')
        print('centroid y (neg) =', mean_y_neg)
        print('centroid x (neg) =', mean_x_neg)
        print('centroid y subim (neg) =', fit.y_mean_1.value)
        print('centroid x subim (neg) =', fit.x_mean_1.value, '\n')
        print('amplitude (neg) =', amplitude_neg)
        print('theta (neg) =', theta_neg)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm_y': fwhm_y,
                'fwhm_x': fwhm_x,
                'amplitude': amplitude,
                'theta': theta,
                'centroid_y_neg': mean_y_neg,
                'centroid_x_neg': mean_x_neg,
                'fwhm_y_neg': fwhm_y_neg,
                'fwhm_x_neg': fwhm_x_neg,
                'amplitude_neg': amplitude_neg,
                'theta_neg': theta_neg
            },
            index=[0],
            dtype=np.float64)
    else:
        return mean_y, mean_x
示例#36
0
def snrmap_fast(array, fwhm, nproc=None, plot=False, verbose=True, **kwargs):
    """ Approximated S/N map generation. To be used as a quick proxy of the
    S/N map generated using the small samples statistics definition.

    Parameters
    ----------
    array : 2d array_like
        Input frame.
    fwhm : float
        Size in pixels of the FWHM.
    nproc : int or None
        Number of processes for parallel computing.
    plot : bool, optional
        If True plots the S/N map.
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : array_like
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()
    if array.ndim != 2:
        raise TypeError('Input array is not a 2d array or image.')

    cy, cx = frame_center(array)
    tophat_kernel = Tophat2DKernel(fwhm / 2)
    array = convolve(array, tophat_kernel)

    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 1, width - 1,
                                mode="mask")[0]
    mask = np.ma.make_mask(mask)
    yy, xx = np.where(mask)
    coords = [(x, y) for (x, y) in zip(xx, yy)]

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if nproc == 1:
        for (y, x) in zip(yy, xx):
            snrmap[y, x] = _snr_approx(array, (x, y), fwhm, cy, cx)[2]
    elif nproc > 1:
        res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm, cy,
                       cx)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype(int), xx.astype(int)] = snr

    if plot:
        plot_frames(snrmap, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes.".format(nproc))
        timing(start_time)
    return snrmap
示例#37
0
def chisquare(modelParameters, cube, angs, plsc, psfs_norm, fwhm, annulus_width,  
              aperture_radius, initialState, ncomp, cube_ref=None, 
              svd_mode='lapack', scaling=None, fmerit='sum', collapse='median',
              imlib='opencv', interpolation='lanczos4', debug=False):
    """
    Calculate the reduced chi2:
    \chi^2_r = \frac{1}{N-3}\sum_{j=1}^{N} |I_j|,
    where N is the number of pixels within a circular aperture centered on the 
    first estimate of the planet position, and I_j the j-th pixel intensity.
    
    Parameters
    ----------    
    modelParameters: tuple
        The model parameters, typically (r, theta, flux).
    cube: numpy.array
        The cube of fits images expressed as a numpy.array.
    angs: numpy.array
        The parallactic angle fits image expressed as a numpy.array. 
    plsc: float
        The platescale, in arcsec per pixel.
    psfs_norm: numpy.array
        The scaled psf expressed as a numpy.array.    
    fwhm : float
        The FHWM in pixels.
    annulus_width: int, optional
        The width in terms of the FWHM of the annulus on which the PCA is done.       
    aperture_radius: int, optional
        The radius of the circular aperture in terms of the FWHM.
    initialState: numpy.array
        Position (r, theta) of the circular aperture center.
    ncomp: int
        The number of principal components.
    cube_ref : numpy ndarray, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.         
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    imlib : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
    interpolation : str, optional
        See the documentation of the ``vip_hci.preproc.frame_shift`` function.
        
    Returns
    -------
    out: float
        The reduced chi squared.
        
    """    
    try:
        r, theta, flux = modelParameters
    except TypeError:
        msg = 'modelParameters must be a tuple, {} was given'
        print(msg.format(type(modelParameters)))

    # Create the cube with the negative fake companion injected
    cube_negfc = cube_inject_companions(cube, psfs_norm, angs, flevel=-flux,
                                        plsc=plsc, rad_dists=[r], n_branches=1,
                                        theta=theta, imlib=imlib, verbose=False,
                                        interpolation=interpolation)
                                      
    # Perform PCA and extract the zone of interest
    res = get_values_optimize(cube_negfc, angs, ncomp, annulus_width*fwhm,
                              aperture_radius*fwhm, initialState[0],
                              initialState[1], cube_ref=cube_ref,
                              svd_mode=svd_mode, scaling=scaling,
                              collapse=collapse, debug=debug)
    if debug and collapse is not None:
        values, frpca = res
        plot_frames(frpca)
    else:
        values = res
    
    # Function of merit
    if fmerit == 'sum':
        values = np.abs(values)
        chi2 = np.sum(values[values > 0])
        N = len(values[values > 0])
        return chi2 / (N-3)
    elif fmerit == 'stddev':
        return np.std(values[values != 0])
    else:
        raise RuntimeError('`fmerit` choice not recognized')
示例#38
0
    def dark_subtract(self, verbose=True, debug=True):
        sci_list = []
        with open(self.outpath + "sci_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sci_list.append(line.split('\n')[0])

        sky_list = []
        with open(self.outpath + "sky_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sky_list.append(line.split('\n')[0])

        unsat_list = []
        with open(self.outpath + "unsat_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                unsat_list.append(line.split('\n')[0])

        unsat_dark_list = []
        with open(self.outpath + "unsat_dark_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                unsat_dark_list.append(line.split('\n')[0])

        flat_list = []
        with open(self.outpath + "flat_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                flat_list.append(line.split('\n')[0])

        flat_dark_list = []
        with open(self.outpath + "flat_dark_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                flat_dark_list.append(line.split('\n')[0])

        sci_dark_list = []
        with open(self.outpath + "sci_dark_list.txt", "r") as f:
            tmp = f.readlines()
            for line in tmp:
                sci_dark_list.append(line.split('\n')[0])
        pixel_scale = fits_info.pixel_scale

        tmp = np.zeros([3, self.com_sz, self.com_sz])
        #creating master dark cubes
        for fd, fd_name in enumerate(flat_dark_list):
            tmp_tmp = open_fits(self.inpath + fd_name,
                                header=False,
                                verbose=debug)
            tmp[fd] = frame_crop(tmp_tmp,
                                 self.com_sz,
                                 force=True,
                                 verbose=debug)
        write_fits(self.outpath + 'flat_dark_cube.fits', tmp)
        if verbose:
            print('Flat dark cubes have been cropped and saved')

        for sd, sd_name in enumerate(sci_dark_list):
            tmp_tmp = open_fits(self.inpath + sd_name,
                                header=False,
                                verbose=debug)
            n_dim = tmp_tmp.ndim
            if sd == 0:
                if n_dim == 2:
                    tmp = np.array([
                        frame_crop(tmp_tmp,
                                   self.com_sz,
                                   force=True,
                                   verbose=debug)
                    ])
                else:
                    tmp = cube_crop_frames(tmp_tmp,
                                           self.com_sz,
                                           force=True,
                                           verbose=debug)
            else:
                if n_dim == 2:
                    tmp = np.append(tmp, [
                        frame_crop(
                            tmp_tmp, self.com_sz, force=True, verbose=debug)
                    ],
                                    axis=0)
                else:
                    tmp = np.append(tmp,
                                    cube_crop_frames(tmp_tmp,
                                                     self.com_sz,
                                                     force=True,
                                                     verbose=debug),
                                    axis=0)
        write_fits(self.outpath + 'sci_dark_cube.fits', tmp)
        if verbose:
            print('Sci dark cubes have been cropped and saved')

        #create an if stament for if the size is larger than sz and less than if less than crop by nx-1
        for sd, sd_name in enumerate(unsat_dark_list):
            tmp_tmp = open_fits(self.inpath + sd_name,
                                header=False,
                                verbose=debug)
            tmp = np.zeros([
                len(sci_dark_list) * tmp_tmp.shape[0], tmp_tmp.shape[1],
                tmp_tmp.shape[2]
            ])
            n_dim = tmp_tmp.ndim
            if sd == 0:
                if n_dim == 2:
                    ny, nx = tmp_tmp.shape
                    if nx <= self.com_sz:
                        tmp = np.array([
                            frame_crop(tmp_tmp,
                                       nx - 1,
                                       force=True,
                                       verbose=debug)
                        ])
                    else:
                        tmp = np.array(
                            [frame_crop(tmp_tmp, self.com_sz, verbose=debug)])
                else:
                    nz, ny, nx = tmp_tmp.shape
                    if nx <= self.com_sz:
                        tmp = cube_crop_frames(tmp_tmp,
                                               nx - 1,
                                               force=True,
                                               verbose=debug)
                    else:
                        tmp = cube_crop_frames(tmp_tmp,
                                               self.com_sz,
                                               force=True,
                                               verbose=debug)
            else:
                if n_dim == 2:
                    ny, nx = tmp_tmp.shape
                    if nx <= self.com_sz:
                        tmp = np.append(tmp, [
                            frame_crop(
                                tmp_tmp, nx - 1, force=True, verbose=debug)
                        ],
                                        axis=0)
                    else:
                        tmp = np.append(tmp, [
                            frame_crop(tmp_tmp,
                                       self.com_sz,
                                       force=True,
                                       verbose=debug)
                        ],
                                        axis=0)
                else:
                    nz, ny, nx = tmp_tmp.shape
                    if nx <= self.com_sz:
                        tmp = cube_crop_frames(tmp,
                                               nx - 1,
                                               force=True,
                                               verbose=debug)
                        tmp = np.append(tmp,
                                        cube_crop_frames(tmp_tmp,
                                                         nx - 1,
                                                         force=True,
                                                         verbose=debug),
                                        axis=0)
                    else:
                        tmp = np.append(tmp,
                                        cube_crop_frames(tmp_tmp,
                                                         self.com_sz,
                                                         force=True,
                                                         verbose=debug),
                                        axis=0)
                tmp = np.zeros([
                    len(sci_dark_list) * tmp_tmp.shape[0], tmp_tmp.shape[1],
                    tmp_tmp.shape[2]
                ])
        write_fits(self.outpath + 'unsat_dark_cube.fits', tmp)
        if verbose:
            print('Unsat dark cubes have been cropped and saved')

        #defining the anulus (this is where we avoid correcting around the star)
        cy, cx = find_agpm_list(self, sci_list)
        self.agpm_pos = (cx, cy)
        if verbose:
            print(' The location of the AGPM has been calculated', 'cy = ', cy,
                  'cx = ', cx)

        agpm_dedge = min(self.agpm_pos[0], self.agpm_pos[1],
                         self.com_sz - self.agpm_pos[0],
                         self.com_sz - self.agpm_pos[1])
        mask_arr = np.ones([self.com_sz, self.com_sz])
        cy, cx = frame_center(mask_arr)
        mask_inner_rad = int(3.0 /
                             pixel_scale)  # 3arcsec min to avoid star emission
        mask_width = agpm_dedge - mask_inner_rad - 1
        mask_AGPM_com = get_annulus_segments(mask_arr,
                                             mask_inner_rad,
                                             mask_width,
                                             mode='mask')[0]
        mask_AGPM_com = frame_shift(
            mask_AGPM_com, self.agpm_pos[1] - cy, self.agpm_pos[0] -
            cx)  # agpm is not centered in the frame so shift the mask
        if verbose:
            print('AGPM mask has been defined')
        if debug:
            tmp = open_fits(self.outpath + sci_list[0])
            #plot_frames(tmp[-1], circle = self.agpm_pos)

        #now begin the dark subtraction useing PCA
        npc_dark = 1  #val found this value gives the best result.
        tmp_tmp = np.zeros([len(flat_list), self.com_sz, self.com_sz])
        tmp_tmp_tmp = open_fits(self.outpath + 'flat_dark_cube.fits')
        for fl, flat_name in enumerate(flat_list):
            tmp = open_fits(self.inpath + flat_name,
                            header=False,
                            verbose=debug)
            tmp_tmp[fl] = frame_crop(tmp,
                                     self.com_sz,
                                     force=True,
                                     verbose=debug)
        tmp_tmp = cube_subtract_sky_pca(tmp_tmp,
                                        tmp_tmp_tmp,
                                        mask_AGPM_com,
                                        ref_cube=None,
                                        ncomp=npc_dark)
        write_fits(self.outpath + '1_crop_flat_cube.fits', tmp_tmp)
        if verbose:
            print('Dark has been subtracted from Flats')
        if debug:
            #plot the median of dark cube median of cube before subtraction median after subtraction
            tmp_tmp_tmp = np.median(tmp_tmp_tmp,
                                    axis=0)  #flat_dark cube median
            tmp = tmp  #flat before subtraction
            tmp_tmp = np.median(tmp_tmp,
                                axis=0)  #median flat after dark subtract
            plot_frames((tmp_tmp_tmp, tmp, tmp_tmp))

        tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits')
        for sc, fits_name in enumerate(sci_list):
            tmp = open_fits(self.inpath + fits_name,
                            header=False,
                            verbose=debug)
            tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
            tmp_tmp = cube_subtract_sky_pca(tmp,
                                            tmp_tmp_tmp,
                                            mask_AGPM_com,
                                            ref_cube=None,
                                            ncomp=npc_dark)
            write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
        if verbose:
            print('Dark has been subtracted from Sci')
        if debug:
            #plot the median of dark cube median of cube before subtraction median after subtraction
            tmp_tmp_tmp = np.median(tmp_tmp_tmp, axis=0)
            tmp = np.median(tmp, axis=0)
            tmp_tmp = np.median(tmp_tmp, axis=0)
            plot_frames((tmp_tmp_tmp, tmp, tmp_tmp))

        tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits')
        for sc, fits_name in enumerate(sky_list):
            tmp = open_fits(self.inpath + fits_name,
                            header=False,
                            verbose=debug)
            tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
            tmp_tmp = cube_subtract_sky_pca(tmp,
                                            tmp_tmp_tmp,
                                            mask_AGPM_com,
                                            ref_cube=None,
                                            ncomp=npc_dark)
            write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
        if verbose:
            print('Dark has been subtracted from Sky')
        if debug:
            #plot the median of dark cube median of cube before subtraction median after subtraction
            tmp_tmp_tmp = np.median(tmp_tmp_tmp, axis=0)
            tmp = np.median(tmp, axis=0)
            tmp_tmp = np.median(tmp_tmp, axis=0)
            plot_frames((tmp_tmp_tmp, tmp, tmp_tmp))

        tmp_tmp_tmp = open_fits(self.outpath + 'master_unsat_dark.fits')
        # no need to crop the unsat frame at the same size as the sci images if they are smaller
        for un, fits_name in enumerate(unsat_list):
            tmp = open_fits(self.inpath + fits_name, header=False)
            #tmp = cube_crop_frames(tmp,nx_unsat_crop)
            if tmp.shape[2] > self.com_sz:
                nx_unsat_crop = self.com_sz
                tmp_tmp = cube_crop_frames(tmp - tmp_tmp_tmp,
                                           nx_unsat_crop,
                                           force=True,
                                           verbose=debug)
            elif tmp.shape[2] % 2 == 0:
                nx_unsat_crop = tmp.shape[2] - 1
                tmp_tmp = cube_crop_frames(tmp - tmp_tmp_tmp,
                                           nx_unsat_crop,
                                           force=True,
                                           verbose=debug)
            else:
                nx_unsat_crop = tmp.shape[2]
                tmp_tmp = tmp - tmp_tmp_tmp
            write_fits(self.outpath + '1_crop_unsat_' + fits_name, tmp_tmp)
        if verbose:
            print('unsat frames have been cropped')
        if debug:
            #plot the median of dark cube median of cube before subtraction median after subtraction
            tmp_tmp_tmp = np.median(tmp_tmp_tmp, axis=0)
            tmp = np.median(tmp, axis=0)
            tmp_tmp = np.median(tmp_tmp, axis=0)
            plot_frames((tmp_tmp_tmp, tmp, tmp_tmp))