Example #1
0
 def save_mosaic(stack, nfiles, patch_size, name, diff_std, threshold):
     stamps_per_row = int(np.sqrt(nfiles))
     nrows = (nfiles - 1) / stamps_per_row + 1
     mx = stamps_per_row * (patch_size + 1) + 1
     my = nrows * (patch_size + 1) + 1
     mosaic = np.ones((my, mx)) * 1000.0
     for i in range(nfiles):
         mosaic[(i / stamps_per_row) * (patch_size + 1) + 1:(
                                                                        i / stamps_per_row + 1) * (
                                                                        patch_size + 1), \
         (i % stamps_per_row) * (patch_size + 1) + 1:(
                                                                 i % stamps_per_row + 1) * (
                                                                 patch_size + 1)] = stack[
                                                                                    i,
                                                                                    :,
                                                                                    :]
         if diff_std[i] > threshold:
             mosaic[(i / stamps_per_row) * (patch_size + 1) + 1:(
                                                                            i / stamps_per_row + 1) * (
                                                                            patch_size + 1), \
             (i % stamps_per_row) * (patch_size + 1) + 1] = -1000.0
             mosaic[(i / stamps_per_row) * (patch_size + 1) + 1:(
                                                                            i / stamps_per_row + 1) * (
                                                                            patch_size + 1), \
             (i % stamps_per_row + 1) * (patch_size + 1) - 1] = -1000.0
             mosaic[(i / stamps_per_row) * (patch_size + 1) + 1, \
             (i % stamps_per_row) * (patch_size + 1) + 1:(
                                                                     i % stamps_per_row + 1) * (
                                                                     patch_size + 1)] = -1000.0
             mosaic[(i / stamps_per_row + 1) * (patch_size + 1) - 1, \
             (i % stamps_per_row) * (patch_size + 1) + 1:(
                                                                     i % stamps_per_row + 1) * (
                                                                     patch_size + 1)] = -1000.0
     IO.write_image(mosaic, name)
Example #2
0
def register_images(files, params):
    """Register the images in the given files list.
    Registered images are placed in the loc_output dir with prefix r_
    """
    # Have we specified a registration template?
    if params.registration_image:
        reg = DS.Observation(params.registration_image, params)
    else:
        reg = DS.EmptyBase()
        reg.fw = 999.0
        for f in files:
            if (f.fw < reg.fw) and (f.fw > 1.2):
                reg = f
    print('Registration image:', reg.name)

    # Register images
    for f in files:
        if f == reg:
            f.image = f.data
            rf = params.loc_output + os.path.sep + 'r_' + f.name
            IO.write_image(f.image, rf)
        else:
            f.register(reg, params)
            # delete image arrays to save memory
            del f.image
            del f.mask
            del f.inv_variance
        del reg.data
        del reg.image
        del reg.mask
        del reg.inv_variance
    return(files)
Example #3
0
def process_image(f, args):
    ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y = args
    dtarget = params.loc_output + os.path.sep + 'd_' + f.name
    if not (os.path.exists(dtarget)):

        #
        # Compute difference image
        #
        result = difference_image(
            ref,
            f,
            params,
            stamp_positions=stamp_positions,
            psf_image=params.loc_output + os.path.sep + 'psf.fits',
            star_positions=star_positions,
            star_group_boundaries=star_group_boundaries,
            detector_mean_positions_x=detector_mean_positions_x,
            detector_mean_positions_y=detector_mean_positions_y)
        del f.image
        del f.mask
        del f.inv_variance

        #
        # Save photometry to a file
        #
        if isinstance(result.flux, np.ndarray):
            #if not(params.use_GPU):
            print 'ungrouping fluxes'
            result.flux = result.flux[star_unsort_index].copy()
            result.dflux = result.dflux[star_unsort_index].copy()
            np.savetxt(params.loc_output + os.path.sep + f.name + '.flux',
                       np.vstack((result.flux, result.dflux)).T)
            f.flux = result.flux.copy()
            f.dflux = result.dflux.copy()

        #
        # Save output images to files
        #
        if isinstance(result.diff, np.ndarray):
            IO.write_image(result.diff,
                           params.loc_output + os.path.sep + 'd_' + f.name)
            IO.write_image(result.model,
                           params.loc_output + os.path.sep + 'm_' + f.name)
            IO.write_image(result.norm,
                           params.loc_output + os.path.sep + 'n_' + f.name)
            IO.write_image(result.mask,
                           params.loc_output + os.path.sep + 'z_' + f.name)
    return 0
Example #4
0
 def register(self, reg, params):
     print(self.name)
     self._image, self._mask, self._inv_variance = IM.register(
         reg, self, params)
     rf = os.path.join(self.output_dir, 'r_' + self.name)
     IO.write_image(self._image, rf)
     rf = os.path.join(self.output_dir, 'sm_' + self.name)
     IO.write_image(self._mask, rf)
     rf = os.path.join(self.output_dir, 'iv_' + self.name)
     IO.write_image(self._inv_variance, rf)
     del self.mask
     del self.data
     del self.inv_variance
Example #5
0
    def register(self, reg, params):
        print self.name
        self._image, self._mask, self._inv_variance = IM.register(
            reg, self, params)
        if self._image is None:
            return False

        print 'registered', self.name

        rf = os.path.join(self.output_dir, 'r_' + self.name)
        IO.write_image(self._image, rf)
        rf = os.path.join(self.output_dir, 'sm_' + self.name)
        IO.write_image(self._mask, rf)
        rf = os.path.join(self.output_dir, 'iv_' + self.name)
        IO.write_image(self._inv_variance, rf)
        del self.mask
        del self.data
        del self.inv_variance

        return True
Example #6
0
def difference_image(ref,
                     target,
                     params,
                     stamp_positions=None,
                     psf_image=None,
                     star_positions=None,
                     star_group_boundaries=None,
                     detector_mean_positions_x=None,
                     detector_mean_positions_y=None,
                     star_sky=None,
                     kernelRadius=None,
                     kernel_inner_rad=7):

    from scipy.linalg import lu_solve, lu_factor, LinAlgError

    start = time.time()
    print 'difference_image', ref.name, target.name

    #
    # Set the kernel size based on the difference in seeing from the reference
    #
    #kernelRadius = min(params.kernel_maximum_radius,
    #                   max(params.kernel_minimum_radius,
    #                       np.abs(target.fw-ref.fw)*params.fwhm_mult))
    if kernelRadius is None:
        kernelRadius = min(
            params.kernel_maximum_radius,
            max(params.kernel_minimum_radius,
                np.sqrt(np.abs(target.fw**2 - ref.fw**2)) * params.fwhm_mult))

    #
    # Mask saturated pixels
    #
    #print 'Masking ',target.name,time.time()-start
    #smask = compute_saturated_pixel_mask(target.image,kernelRadius,params)

    #
    # Define the kernel basis functions
    #
    print 'Defining kernel pixels', time.time() - start
    if params.use_fft_kernel_pixels:
        kernelIndex, extendedBasis = IM.define_kernel_pixels_fft(
            ref,
            target,
            kernelRadius + 2,
            INNER_RADIUS=20,
            threshold=params.fft_kernel_threshold)
    else:
        kernelIndex, extendedBasis = IM.define_kernel_pixels(
            kernelRadius, INNER_RADIUS=kernel_inner_rad)
    nKernel = kernelIndex.shape[0]

    #
    # We dont want to use bad pixels in either the target or reference image
    #
    smask = target.mask * ref.mask
    bmask = np.ones(smask.shape, dtype=bool)

    g = DS.EmptyBase()

    for iteration in range(params.iterations):

        print 'Computing matrix', time.time() - start

        tmask = bmask * smask

        #
        # Compute the matrix and vector
        #
        H, V, texref = CI.compute_matrix_and_vector_cuda(
            ref.image,
            ref.blur,
            target.image,
            target.inv_variance,
            tmask,
            kernelIndex,
            extendedBasis,
            kernelRadius,
            params,
            stamp_positions=stamp_positions)

        #
        # Solve the matrix equation to find the kernel coefficients
        #
        print 'Solving matrix equation', time.time() - start
        try:
            lu, piv = lu_factor(H)
            c = lu_solve((lu, piv), V).astype(np.float32).copy()
        except (LinAlgError, ValueError):
            print 'LU decomposition failed'
            g.model = None
            g.flux = None
            g.diff = None
            print 'H'
            print H
            sys.stdout.flush()
            return g

        #
        # Compute the model image
        #
        print 'Computing model', time.time() - start
        g.model = CI.compute_model_cuda(ref.image.shape, texref, c,
                                        kernelIndex, extendedBasis, params)
        edges = np.where(ref.image < 1.0)
        g.model[edges] = 0.0

        #
        # Compute the difference image
        #
        difference = (target.image - g.model)
        g.norm = difference * np.sqrt(target.inv_variance)

        #
        # Recompute the variance image from the model
        #
        #target.inv_variance = 1.0/(g.model/params.gain +
        #						   (params.readnoise/params.gain)**2) + (1-smask)
        mp = np.where(tmask == 0)
        if len(mp[0]) > 0:
            target.inv_variance[mp] = 1.e-12

        #
        # Mask pixels that disagree with the model
        #
        if iteration > 2:
            bmask = IM.kappa_clip(smask, g.norm,
                                  params.pixel_rejection_threshold)

        print 'Iteration', iteration, 'completed', time.time() - start

    #
    # Delete the target image array to save memory
    #
    del target.image

    #
    # Save the kernel coefficients to a file
    #
    if params.do_photometry and psf_image:
        kf = params.loc_output + os.path.sep + 'k_' + os.path.basename(
            target.name)
        IO.write_kernel_table(kf, kernelIndex, extendedBasis, c, params)

        print 'coeffs', c

    g.norm = difference * np.sqrt(target.inv_variance)
    g.variance = 1.0 / target.inv_variance
    g.mask = tmask

    #
    # Do the photometry if requested
    #
    g.flux = None
    if params.do_photometry and psf_image:
        print 'star_positions', star_positions.shape
        print 'star_group_boundaries', star_group_boundaries
        if ref.name == target.name:
            sky_image, _ = IO.read_fits_file(params.loc_output + os.path.sep +
                                             'temp.sub2.fits')
            phot_target = ref.image - sky_image
            IO.write_image(
                phot_target,
                params.loc_output + os.path.sep + 'clean_' + ref.name)
            g.flux, g.dflux = CIF.photom_all_stars_simultaneous(
                phot_target, target.inv_variance, star_positions, psf_image, c,
                kernelIndex, extendedBasis, kernelRadius, params,
                star_group_boundaries, detector_mean_positions_x,
                detector_mean_positions_y)
        else:
            phot_target = difference
            g.flux, g.dflux = CI.photom_all_stars(
                phot_target, target.inv_variance, star_positions, psf_image, c,
                kernelIndex, extendedBasis, kernelRadius, params,
                star_group_boundaries, detector_mean_positions_x,
                detector_mean_positions_y)

        print 'Photometry completed', time.time() - start

    #
    # Apply the photometric scale factor to the difference image.
    # We don't do this prior to the photometry because the PSF is
    # being convolved by the kernel, which already includes the
    # photometric scale factor.
    #
    g.diff = IM.apply_photometric_scale(difference, c, params.pdeg)
    sys.stdout.flush()
    return g
Example #7
0
def make_reference(files, reg, params, reference_image='ref.fits'):
    seeing = {}
    sky = {}
    ref_seeing = 1000

    #
    # Have we specified the files to make the reference with?
    #
    if params.ref_include_file:

        ref_list = []
        for line in open(params.ref_include_file, 'r'):
            for f in files:
                if f.name == line.split()[0]:
                    ref_list.append(f)
                    print f.name, f.fw, f.signal
                    if f.fw < ref_seeing:
                        ref_sky = f.sky
                        ref_seeing = f.fw
                        best_seeing_ref = f

    else:

        #
        # We try to choose the best images
        #
        reference_exclude = []
        if params.ref_exclude_file:
            for line in open(params.ref_exclude_file, 'r'):
                reference_exclude.append(line.split()[0])

        sig = []
        for f in files:
            sig.append(f.signal)

        sig = np.asarray(sig)
        sigcut = np.mean(sig) - 2.0 * np.std(sig)
        print 'signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut

        print 'Searching for lowest-background image'
        ref_sky = 1.e6
        for f in files:
            print f.name, f.fw, f.sky, f.signal
            if (f.sky < ref_sky) and (f.fw > params.reference_min_seeing) and \
               (f.roundness < params.reference_max_roundness) and (f.signal > sigcut) and not(f.name in reference_exclude):
                ref_sky = f.sky
        ref_sky = np.max([ref_sky, params.pixel_max / 1.e5])

        print 'Searching for best-seeing image'
        ref_seeing = 100.0
        for f in files:
            print f.name, f.fw, f.sky, f.signal
            if (f.fw < ref_seeing) and (f.fw > params.reference_min_seeing) and \
               (f.roundness < params.reference_max_roundness) and (f.signal > sigcut) and not(f.name in reference_exclude):
                ref_seeing = f.fw
                best_seeing_ref = f

        ref_list = []
        while len(ref_list) < params.min_ref_images:
            ref_list = []
            print 'Reference FWHM = ', ref_seeing
            print 'Cutoff FWHM for reference = ', params.reference_seeing_factor * ref_seeing
            print 'Cutoff background for reference = ', params.reference_sky_factor * ref_sky
            print 'Combining for reference:'
            for f in files:
                if (f.fw < params.reference_seeing_factor*ref_seeing) and \
                    (f.roundness < params.reference_max_roundness) and (f.sky < params.reference_sky_factor*ref_sky) and \
                    (f.fw > params.reference_min_seeing) and (f.signal > sigcut) and not(f.name in reference_exclude):
                    ref_list.append(f)
                    print f.name, f.fw, f.sky, f.signal
            params.reference_seeing_factor *= 1.02
            params.reference_sky_factor *= 1.02

        if len(ref_list) > params.max_ref_images:
            ref_list = ref_list[:params.max_ref_images]

        sig = []
        for f in ref_list:
            sig.append(f.signal)
        sig = np.asarray(sig)
        sigcut = np.mean(sig) - 2 * np.std(sig)
        print 'signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut

        ref_seeing = 1000
        ref_roundness = 2.0
        for f in ref_list:
            if (f.fw < ref_seeing) and (f.signal > sigcut):
                ref_sky = f.sky
                ref_seeing = f.fw
                ref_roundness = f.roundness
                best_seeing_ref = f

    #
    # Which ref image has the worst seeing?
    #
    worst_seeing = 0.0
    for f in ref_list:
        if f.fw > worst_seeing:
            worst_seeing = f.fw
            worst_seeing_ref = f

    if params.ref_image_list:
        with open(params.loc_output + os.path.sep + params.ref_image_list,
                  'w') as fid:
            for f in ref_list:
                fid.write(f.name + '  ' + str(f.fw) + '  ' + str(f.sky) +
                          '  ' + str(f.signal) + '\n')

    #
    # Find the locations of the brightest stars to use as stamp positions
    # if required
    #
    stamp_positions = None
    if params.use_stamps:
        stars = PH.choose_stamps(best_seeing_ref, params)
        stamp_positions = stars[:, 0:2]

    #
    #  Construct the reference image.
    #
    ref = np.zeros([1, 1])
    sum1 = 0
    sum2 = 0

    good_ref_list = []

    for f in ref_list:
        f.blur = IM.boxcar_blur(f.image)
        good_ref_list.append(f)
        print 'difference_image:', f.name, best_seeing_ref.name

    if not (params.use_GPU) and (params.n_parallel > 1):

        #
        # Use ParallelProcessing to process images in the reference list
        #

        pool = Pool(params.n_parallel)
        results = pool.map(
            process_reference_image_helper,
            itertools.izip(
                ref_list,
                itertools.repeat((best_seeing_ref, params, stamp_positions))))

        for i, f in enumerate(ref_list):
            f.result = results[i]

    else:

        for f in ref_list:
            f.result = process_reference_image(
                f, (best_seeing_ref, params, stamp_positions))

    #
    # Remove bad reference models
    #

    rlist = [g for g in good_ref_list]
    for g in rlist:
        if not (isinstance(g.result.diff, np.ndarray)):
            print 'removing', g.name
            good_ref_list.remove(g)

    print 'good reference list:'
    for g in good_ref_list:
        print g.name

    print 'kappa-clipping reference list'
    for iterations in range(5):
        if len(good_ref_list) < 4:
            break
        sd = np.zeros(len(good_ref_list))
        for i, g in enumerate(good_ref_list):
            print g.name, g.result.diff
            sd[i] = np.std(g.result.diff)
        sds = sd.std()
        sdm = sd.mean()
        rlist = [g for g in good_ref_list]
        for g in rlist:
            if np.std(g.result.diff) > (sdm + 2.5 * sds):
                print 'removing', g.name
                good_ref_list.remove(g)

    #
    # Combine the good reference models
    #
    g = good_ref_list[0]
    gstack = np.zeros(
        [len(good_ref_list), g.result.model.shape[0], g.result.model.shape[1]])
    var_ref = np.zeros_like(g.result.model)
    mask = np.ones_like(g.result.model)
    print 'final reference list'
    for i, g in enumerate(good_ref_list):
        if isinstance(g.result.model, np.ndarray):
            print g.name, np.std(g.result.diff), np.median(g.result.model)
            IO.write_image(g.result.model,
                           params.loc_output + os.path.sep + 'mr_' + g.name)
            gstack[i, :, :] = g.result.model
            var_ref += g.result.model / params.gain + (params.readnoise /
                                                       params.gain)**2
            mask *= g.mask
    rr = np.median(gstack, axis=0)
    IO.write_image(rr, params.loc_output + os.path.sep + reference_image)
    IO.write_image(mask,
                   params.loc_output + os.path.sep + 'mask_' + reference_image)
    var_ref /= np.float(len(good_ref_list))
    IO.write_image(var_ref,
                   params.loc_output + os.path.sep + 'var_' + reference_image)
    if params.error_image_prefix is not None:
        IO.write_image(
            np.sqrt(var_ref), params.loc_output + os.path.sep +
            params.error_image_prefix + reference_image)

    for f in ref_list:
        f.result = None

    return stamp_positions
        headers += [channel + str(n) for n in range(n) for channel in ['R', 'G', 'B', 'P']]

    writer.writerow(headers)

    for filename in images:
        color = io_functions.read_image(os.path.join(input_folder, 'color_' + filename))
        abdomen = io_functions.read_image(os.path.join(input_folder, 'abdomen_' + filename))[:, :, 0]
        wings = io_functions.read_image(os.path.join(input_folder, 'wings_' + filename))[:, :, 0]
        if color is not None and abdomen is not None and wings is not None:
            HSV = rgb2hsv(color)

            segments = [Segment('wings', wings, 10),
                        Segment('abdomen', abdomen, 5)]

            current_row = [os.path.splitext(filename)[0]]

            for segment in segments:
                current_row += [segment.name,
                                np.mean(HSV[:, :, 0][np.where(segment.mask > 128)]),
                                np.mean(HSV[:, :, 1][np.where(segment.mask > 128)])]

                dc = dominant_colors(color.astype('float32'), segment.num_colors, mask=segment.mask)

                output = visualise_colors(dc, 100, 100 * segment.num_colors)
                io_functions.write_image(os.path.join(output_folder,'{}_{}'.format(segment.name, filename)), output)

                for c in dc:
                    current_row += [c.RGB[channel] for channel in range(0, 3)[::-1]] + [c.proportion]

            writer.writerow(current_row)
Example #9
0
def photom_variable_star(x0,
                         y0,
                         params,
                         patch_half_width=15,
                         converge=True,
                         save_stamps=False,
                         stamp_prefix='mosaic',
                         locate=True,
                         locate_iterations=2,
                         locate_half_width=14,
                         q_sigma_threshold=1.0,
                         locate_date_range=None):

    from astropy.io import fits

    def save_mosaic(stack, nfiles, patch_size, name, diff_std, threshold):
        stamps_per_row = int(np.sqrt(nfiles))
        nrows = (nfiles - 1) / stamps_per_row + 1
        mx = stamps_per_row * (patch_size + 1) + 1
        my = nrows * (patch_size + 1) + 1
        mosaic = np.ones((my, mx)) * 1000.0
        for i in range(nfiles):
            mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
                    (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] \
                    = stack[i,:,:]
            if diff_std[i] > threshold:
                mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
                      (i%stamps_per_row)*(patch_size+1)+1] = -1000.0
                mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
                      (i%stamps_per_row+1)*(patch_size+1)-1] = -1000.0
                mosaic[(i/stamps_per_row)*(patch_size+1)+1, \
                      (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] = -1000.0
                mosaic[(i/stamps_per_row+1)*(patch_size+1)-1, \
                      (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] = -1000.0
        IO.write_image(mosaic, name)

    # Obtain a list of files

    all_files = os.listdir(params.loc_data)
    all_files.sort()
    filenames = []
    nfiles = 0

    print 'Searching in', params.loc_output, 'for', params.name_pattern

    for f in all_files:

        if fnmatch.fnmatch(f, params.name_pattern):

            basename = os.path.basename(f)
            dfile = params.loc_output + os.path.sep + 'd_' + basename
            ktable = params.loc_output + os.path.sep + 'k_' + basename

            if os.path.exists(dfile) and os.path.exists(ktable):

                nfiles += 1
                filenames.append(f)

    # Load the kernel tables
    # Load the difference images into a data cube

    print len(filenames), 'files found'

    dates = np.zeros(nfiles)
    seeing = np.zeros(nfiles)
    roundness = np.zeros(nfiles)
    bgnd = np.zeros(nfiles)
    signal = np.zeros(nfiles)
    norm_std = np.zeros(nfiles, dtype=np.float64)
    diff_std = np.zeros(nfiles, dtype=np.float64)
    n_kernel = np.zeros(nfiles, dtype=np.int32)
    n_coeffs = np.zeros(nfiles, dtype=np.int32)
    kindex_x = np.arange(0, dtype=np.int32)
    kindex_y = np.arange(0, dtype=np.int32)
    kindex_ext = np.arange(0, dtype=np.int32)
    coeffs = np.arange(0, dtype=np.float64)

    filenames.sort()

    if not converge:
        locate_iterations = 1

    threshold = -10
    for iteration in range(np.max([1, locate_iterations])):

        ix0 = np.int32(x0 + 0.5)
        iy0 = np.int32(y0 + 0.5)

        x_patch = x0 - ix0 + patch_half_width
        y_patch = y0 - iy0 + patch_half_width

        patch_size = 2 * patch_half_width + 1
        patch_slice = (ix0 - patch_half_width, ix0 + patch_half_width + 1,
                       iy0 - patch_half_width, iy0 + patch_half_width + 1)

        d_image_stack = np.zeros((nfiles, patch_size, patch_size),
                                 dtype=np.float64)
        inv_var_image_stack = np.zeros((nfiles, patch_size, patch_size),
                                       dtype=np.float64)

        for i, f in enumerate(filenames):

            basename = os.path.basename(f)
            ktable = params.loc_output + os.path.sep + 'k_' + basename
            kernelIndex, extendedBasis, c, params = IO.read_kernel_table(
                ktable, params)
            coeffs = np.hstack((coeffs, c))
            kindex_x = np.hstack((kindex_x, kernelIndex[:, 0].T))
            kindex_y = np.hstack((kindex_y, kernelIndex[:, 1].T))
            kindex_ext = np.hstack((kindex_ext, extendedBasis))
            n_kernel[i] = kernelIndex.shape[0]
            n_coeffs[i] = c.shape[0]
            dates[i] = IO.get_date(params.loc_data + os.path.sep + basename,
                                   key=params.datekey) - 2450000
            seeing[i], roundness[i], bgnd[i], signal[i] = IM.compute_fwhm(
                f, params, width=20, image_name=True)

            dfile = params.loc_output + os.path.sep + 'd_' + basename
            nfile = params.loc_output + os.path.sep + 'n_' + basename
            zfile = params.loc_output + os.path.sep + 'z_' + basename
            diff, _ = IO.read_fits_file(dfile)
            mask, _ = IO.read_fits_file(zfile)
            diff_sc = IM.undo_photometric_scale(diff, c, params.pdeg)
            diff_sc *= mask
            d_image_stack[i, :, :] = diff_sc[patch_slice[2]:patch_slice[3],
                                             patch_slice[0]:patch_slice[1]]
            norm, _ = IO.read_fits_file(nfile, slice=patch_slice)
            inv_var_image_stack[i, :, :] = (norm / d_image_stack[i, :, :])**2
            diff_std[i] = np.std(diff)
            d_image_stack[i, :, :] -= np.median(d_image_stack[i, :, :])

        if save_stamps:
            save_mosaic(
                d_image_stack, nfiles, patch_size,
                params.loc_output + os.path.sep + stamp_prefix + '.fits',
                diff_std, threshold)

        print 'kappa-clipping'
        qd1 = np.arange(len(filenames))
        #qd = np.where(diff_std[qd1]<10)
        #qd1 = qd1[qd]
        for iter in range(10):
            qd = np.where(diff_std[qd1] < np.mean(diff_std[qd1]) +
                          (4.0 - 1.5 * (iter / 9.0)) * np.std(diff_std[qd1]))
            qd1 = qd1[qd]
            print iter, np.mean(diff_std[qd1]), np.std(diff_std[qd1]), np.mean(
                diff_std[qd1]) + (4.0 - 3 *
                                  (iter / 9.0)) * np.std(diff_std[qd1])

        print 'mean(diff) :', np.mean(diff_std[qd1])
        print 'std(diff) :', np.std(diff_std[qd1])
        print '1-sig threshold:', np.mean(
            diff_std[qd1]) + 1 * np.std(diff_std[qd1])
        print '2-sig threshold:', np.mean(
            diff_std[qd1]) + 2 * np.std(diff_std[qd1])
        print '3-sig threshold:', np.mean(
            diff_std[qd1]) + 3 * np.std(diff_std[qd1])

        print '1-sig diff reject:', np.where(
            diff_std > np.mean(diff_std[qd1]) + 1 * np.std(diff_std[qd1]))
        print '2-sig diff reject:', np.where(
            diff_std > np.mean(diff_std[qd1]) + 2 * np.std(diff_std[qd1]))
        print '3-sig diff reject:', np.where(
            diff_std > np.mean(diff_std[qd1]) + 3 * np.std(diff_std[qd1]))

        threshold = np.mean(
            diff_std[qd1]) + q_sigma_threshold * np.std(diff_std[qd1])
        threshold2 = np.mean(diff_std[qd1]) + 2 * np.std(diff_std[qd1])
        threshold3 = np.mean(diff_std[qd1]) + 3 * np.std(diff_std[qd1])

        if locate_date_range is not None:
            diff_std_copy = diff_std.copy()
            diff_std = diff_std * 0.0 + 2 * threshold
            pp = np.where((dates > locate_date_range[0])
                          & (dates < locate_date_range[1]))[0]
            if pp.any():
                diff_std[pp] = diff_std_copy[pp]
            else:
                print 'Error: No images found in date range', locate_date_range
                print 'Reverting to all dates.'
                diff_std = diff_std_copy

        dsum = np.zeros((patch_size, patch_size), dtype=np.float64)
        for i in range(nfiles):
            if diff_std[i] < threshold3:
                dsum += d_image_stack[i, :, :]
        IO.write_image(
            dsum, params.loc_output + os.path.sep + 'dsum%d.fits' % iteration)
        dr = patch_half_width - int(locate_half_width)
        dsum[:dr, :] = 0.0
        dsum[-dr:, :] = 0.0
        dsum[:, :dr] = 0.0
        dsum[:, -dr:] = 0.0
        ind_dsum_max = np.unravel_index(dsum.argmax(), dsum.shape)
        print 'Iteration', iteration, ': dsum maximum located at ', ind_dsum_max

        if locate and converge:
            y0 += ind_dsum_max[0] - patch_half_width
            x0 += ind_dsum_max[1] - patch_half_width

    # Read the PSF

    psf_image = params.loc_output + os.path.sep + 'psf.fits'
    psf, psf_hdr = fits.getdata(psf_image, 0, header='true')
    psf_height = psf_hdr['PSFHEIGH']
    psf_sigma_x = psf_hdr['PAR1'] * 0.8493218
    psf_sigma_y = psf_hdr['PAR2'] * 0.8493218
    psf_x = psf_hdr['PSFX']
    psf_y = psf_hdr['PSFY']
    psf_size = psf.shape[1]
    psf_fit_rad = params.psf_fit_radius
    psf_parameters = np.array([
        psf_size, psf_height, psf_sigma_x, psf_sigma_y, psf_x, psf_y,
        psf_fit_rad, params.gain
    ]).astype(np.float64)

    if params.psf_profile_type == 'gaussian':
        psf_sigma_x = psf_hdr['PAR1'] * 0.8493218
        psf_sigma_y = psf_hdr['PAR2'] * 0.8493218
        psf_parameters = np.array([
            psf_size, psf_height, psf_sigma_x, psf_sigma_y, psf_x, psf_y,
            psf_fit_rad, params.gain
        ]).astype(np.float64)
        profile_type = 0
    elif params.psf_profile_type == 'moffat25':
        print 'params.psf_profile_type moffat25 not working yet. Exiting.'
        sys.exit(0)
        psf_sigma_x = psf_hdr['PAR1']
        psf_sigma_y = psf_hdr['PAR2']
        psf_sigma_xy = psf_hdr['PAR3']
        psf_parameters = np.array([
            psf_size, psf_height, psf_sigma_x, psf_sigma_y, psf_x, psf_y,
            psf_fit_rad, params.gain, psf_sigma_xy
        ]).astype(np.float64)
        profile_type = 1
    else:
        print 'params.psf_profile_type undefined'
        sys.exit(0)

    psf_0 = psf.astype(np.float64).copy()
    psf_xd = psf.astype(np.float64).copy() * 0.0
    psf_yd = psf.astype(np.float64).copy() * 0.0
    flux = np.zeros(nfiles, dtype=np.float64)
    dflux = np.zeros(nfiles, dtype=np.float64)

    x0_arr = np.atleast_1d(np.array([x0], dtype=np.float64))
    y0_arr = np.atleast_1d(np.array([y0], dtype=np.float64))

    cu_photom_converge(
        profile_type, patch_half_width, params.pdeg, params.sdeg, nfiles,
        n_kernel, kindex_x, kindex_y, kindex_ext, n_coeffs,
        coeffs.astype(np.float64), psf_parameters, psf_0, psf_xd, psf_yd,
        np.float64(d_image_stack.ravel()), inv_var_image_stack, diff_std,
        np.float64(threshold), x0_arr, y0_arr, x_patch, y_patch, diff.shape[1],
        diff.shape[0], 16, 16, flux, dflux, np.float64(params.gain),
        np.int32(converge), np.float64(2.5))

    if save_stamps:
        save_mosaic(
            d_image_stack, nfiles, patch_size,
            params.loc_output + os.path.sep + 'p' + stamp_prefix + '.fits',
            diff_std, threshold)

    if locate_date_range is not None:
        diff_std = diff_std_copy

    return dates, seeing, roundness, bgnd, signal, flux, dflux, diff_std / threshold, x0_arr[
        0], y0_arr[0]
            segments = [
                Segment('wings', wings, 10),
                Segment('abdomen', abdomen, 5)
            ]

            current_row = [os.path.splitext(filename)[0]]

            for segment in segments:
                current_row += [
                    segment.name,
                    np.mean(HSV[:, :, 0][np.where(segment.mask > 128)]),
                    np.mean(HSV[:, :, 1][np.where(segment.mask > 128)])
                ]

                dc = dominant_colors(color.astype('float32'),
                                     segment.num_colors,
                                     mask=segment.mask)

                output = visualise_colors(dc, 100, 100 * segment.num_colors)
                io_functions.write_image(
                    os.path.join(output_folder,
                                 '{}_{}'.format(segment.name, filename)),
                    output)

                for c in dc:
                    current_row += [
                        c.RGB[channel] for channel in range(0, 3)[::-1]
                    ] + [c.proportion]

            writer.writerow(current_row)
Example #11
0
def make_diff_images(filenamelist, refim, params):
    """ make a diff image for each file in filenamelist: file - refim.

    filenamelist :  list of filenames for 'target' images
    refim : reference image, either a filename or a DIA Observation object
    params : DIA parameters object
    """
    star_group_boundaries = None
    detector_mean_positions_x = None
    detector_mean_positions_y = None
    star_unsort_index = None
    star_positions = None
    stamp_positions = None
    sky = 0.0

    if isinstance(refim, str) and os.path.exists(refim):
        refim = DS.Observation(refim, params)

    # TODO: investigate what is really being done here:
    #  Apply saturation mask and boxcar blurring to reference image
    mask, _ = IO.read_fits_file(
            params.loc_output + os.path.sep + 'mask_' + refim.name)
    refim.mask = mask
    pm = params.pixel_max
    params.pixel_max *= 0.9
    refim.mask *= IM.compute_saturated_pixel_mask(refim.image, 4, params)
    params.pixel_max = pm
    refim.blur = IM.boxcar_blur(refim.image)
    if params.mask_cluster:
        refim.mask *= IM.mask_cluster(refim.image, refim.mask, params)

    # For each given filename, get a pyDIA observation object
    image_list = get_observation_list(filenamelist, params)

    # Register the images, using the ref image as the registration template,
    # unless the user has specified otherwise
    if not params.registration_image:
        params.registration_image = refim.fullname
    registered_image_list = register_images(image_list, params)

    # make diff images: im - ref
    for im in registered_image_list:
        result = DIA.difference_image(
            refim, im, params,
            stamp_positions=stamp_positions,
            psf_image=params.loc_output + os.path.sep + 'psf.fits',
            star_positions=star_positions,
            star_group_boundaries=star_group_boundaries,
            detector_mean_positions_x=detector_mean_positions_x,
            detector_mean_positions_y=detector_mean_positions_y)
        del im.image
        del im.mask
        del im.inv_variance

        hdr = fits.getheader(im.fullname)
        #  TODO : use astropy fits to propagate header with WCS from parent image
        # Save output images to files
        if isinstance(result.diff, np.ndarray):
            IO.write_image(result.diff,
                           params.loc_output + os.path.sep + 'd_' + im.name,
                           header=hdr)
Example #12
0
def do_photometry(params,
                  extname='newflux',
                  star_file='star_positions',
                  psf_file='psf.fits',
                  star_positions=None,
                  reference_image='ref.fits'):

    #
    # Determine our list of files
    #
    all_files = os.listdir(params.loc_data)
    all_files.sort()
    files = []
    for f in all_files:
        if fnmatch.fnmatch(f, params.name_pattern):
            g = DS.Observation(params.loc_data + os.path.sep + f, params)
            if g.fw > 0.0:
                files.append(g)

    ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
                         params)
    ref.register(ref, params)

    #
    # Detect stars and compute the PSF if necessary
    #

    psf_file = params.loc_output + os.path.sep + psf_file
    star_file = params.loc_output + os.path.sep + star_file

    print psf_file
    print os.path.exists(psf_file)
    print star_file
    print os.path.exists(star_file)

    if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)):
        stars = PH.compute_psf_image(params, ref, psf_image=psf_file)

    if star_positions is None:

        if os.path.exists(star_file):
            star_positions = np.genfromtxt(star_file)[:, :2]
        else:
            star_positions = stars[:, 0:2]

    #
    # Group the stars by location
    #
    star_group_boundaries = None
    detector_mean_positions_x = None
    detector_mean_positions_y = None
    star_sort_index,star_group_boundaries,detector_mean_positions_x,detector_mean_positions_y = \
       PH.group_stars_ccd(params,star_positions,params.loc_output+os.path.sep+reference_image)
    star_positions = star_positions[star_sort_index]
    star_unsort_index = np.argsort(star_sort_index)

    #
    # Process the reference image
    #
    print 'Processing', reference_image
    ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
                         params)
    #reg = Observation(params.loc_data+os.path.sep+
    #                  params.registration_image,params)
    mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' +
                                reference_image)
    variance, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'var_' +
                                    reference_image)
    ref.mask = mask
    ref.inv_variance = 1.0 / variance + (1 - mask)
    ref.register(ref, params)
    smask = IM.compute_saturated_pixel_mask(ref.image, params)
    ref.inv_variance += (1 - (smask * mask)) * 1.e-12
    ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
        reference_image)
    kernelIndex, extendedBasis, c, params = IO.read_kernel_table(
        ktable, params)
    kernelRadius = np.max(kernelIndex[:, 0]) + 1
    if np.sum(extendedBasis) > 0:
        kernelRadius += 1
    print 'kernelIndex', kernelIndex
    print 'extendedBasis', extendedBasis
    print 'coeffs', c
    print 'kernelRadius', kernelRadius
    print 'star_positions', star_positions.shape
    phot_target, _ = IO.read_fits_file(params.loc_output + os.path.sep +
                                       'clean_' + reference_image)
    ref.flux, ref.dflux = CIF.photom_all_stars_simultaneous(
        phot_target, ref.inv_variance, star_positions, psf_file, c,
        kernelIndex, extendedBasis, kernelRadius, params,
        star_group_boundaries, detector_mean_positions_x,
        detector_mean_positions_y)

    if isinstance(ref.flux, np.ndarray):
        if not (params.use_GPU):
            print 'ungrouping fluxes'
            ref.flux = ref.flux[star_unsort_index].copy()
            ref.dflux = ref.dflux[star_unsort_index].copy()
            print ref.flux.shape, star_positions.shape
        np.savetxt(
            params.loc_output + os.path.sep + reference_image + '.' + extname,
            np.vstack((ref.flux, ref.dflux)))

    #
    # Process difference images
    #
    for f in files:

        if not (os.path.exists(params.loc_output + os.path.sep + f.name + '.' +
                               extname)):

            print 'Processing', f.name
            target = f.name
            dtarget = params.loc_output + os.path.sep + 'd_' + os.path.basename(
                target)
            ntarget = params.loc_output + os.path.sep + 'n_' + os.path.basename(
                target)
            ztarget = params.loc_output + os.path.sep + 'z_' + os.path.basename(
                target)
            ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
                target)

            if os.path.exists(dtarget) and os.path.exists(
                    ntarget) and os.path.exists(ktable):

                norm, h = IO.read_fits_file(ntarget)
                diff, h = IO.read_fits_file(dtarget)
                mask, h = IO.read_fits_file(ztarget)
                inv_var = (norm / diff)**2 + (1 - mask)

                kernelIndex, extendedBasis, c, params = IO.read_kernel_table(
                    ktable, params)
                kernelRadius = np.max(kernelIndex[:, 0]) + 1
                if np.sum(extendedBasis) > 0:
                    kernelRadius += 1

                print 'kernelIndex', kernelIndex
                print 'extendedBasis', extendedBasis
                print 'coeffs', c
                print 'kernelRadius', kernelRadius

                IO.write_image(diff,
                               params.loc_output + os.path.sep + 'diff1.fits')
                diff = IM.undo_photometric_scale(diff, c, params.pdeg)
                IO.write_image(diff,
                               params.loc_output + os.path.sep + 'diff2.fits')
                IO.write_image(
                    inv_var, params.loc_output + os.path.sep + 'inv_var.fits')
                IO.write_kernel_table(
                    params.loc_output + os.path.sep + 'ktable.fits',
                    kernelIndex, extendedBasis, c, params)

                flux, dflux = CI.photom_all_stars(
                    diff, inv_var, star_positions, psf_file, c, kernelIndex,
                    extendedBasis, kernelRadius, params, star_group_boundaries,
                    detector_mean_positions_x, detector_mean_positions_y)

                print 'flux[100:110]:'
                print flux[100:110]
                if isinstance(flux, np.ndarray):
                    if not (params.use_GPU):
                        print 'ungrouping fluxes'
                        flux = flux[star_unsort_index].copy()
                        dflux = dflux[star_unsort_index].copy()
                        print 'unsort flux[100:110]:'
                        print flux[100:110]
                    np.savetxt(
                        params.loc_output + os.path.sep + f.name + '.' +
                        extname,
                        np.vstack((flux, dflux)).T)

                sys.exit(0)
Example #13
0
 def set_image(self, value):
     self._image = value
     image_name = os.path.join(self.output_dir, 'r_' + self.name)
     IO.write_image(self._image, image_name)
Example #14
0
def photom_variable_star(x0,y0,params,patch_half_width=15,converge=True,save_stamps=False,stamp_prefix='mosaic',locate=True,locate_iterations=2,
						  locate_half_width=14,q_sigma_threshold=1.0,locate_date_range=None):

	from astropy.io import fits
	from scipy.ndimage.filters import median_filter

	outer_radius = 15
	inner_radius = 12
	diameter = 2*outer_radius + 1
	x = np.arange(diameter)-outer_radius
	xx,yy = np.meshgrid(x,x)
	filter_kernel = np.zeros((diameter,diameter))
	filter_kernel[xx**2+yy**2<=outer_radius**2] = 1
	filter_kernel[xx**2+yy**2<=inner_radius**2] = 0


	def save_mosaic(stack,nfiles,patch_size,name,diff_std,threshold):
		stamps_per_row = int(np.sqrt(nfiles))
		nrows = (nfiles-1)/stamps_per_row+1;
		mx = stamps_per_row*(patch_size+1)+1
		my = nrows*(patch_size+1)+1
		mosaic = np.ones((my,mx))*1000.0
		for i in range(nfiles):
		  mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
				  (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] \
				  = stack[i,:,:]
		  if diff_std[i] > threshold:
			mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
				  (i%stamps_per_row)*(patch_size+1)+1] = -1000.0
			mosaic[(i/stamps_per_row)*(patch_size+1)+1:(i/stamps_per_row+1)*(patch_size+1), \
				  (i%stamps_per_row+1)*(patch_size+1)-1] = -1000.0
			mosaic[(i/stamps_per_row)*(patch_size+1)+1, \
				  (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] = -1000.0                  
			mosaic[(i/stamps_per_row+1)*(patch_size+1)-1, \
				  (i%stamps_per_row)*(patch_size+1)+1:(i%stamps_per_row+1)*(patch_size+1)] = -1000.0                  
		IO.write_image(mosaic,name)

	# Obtain a list of files

	all_files = os.listdir(params.loc_data)
	all_files.sort()
	filenames = []
	nfiles = 0

	print 'Searching in', params.loc_output, 'for', params.name_pattern

	for f in all_files:

		if fnmatch.fnmatch(f,params.name_pattern):

			basename = os.path.basename(f)
			dfile = params.loc_output+os.path.sep+'d_'+basename
			ktable = params.loc_output+os.path.sep+'k_'+basename

			if os.path.exists(dfile) and os.path.exists(ktable):

				nfiles += 1
				filenames.append(f)

	# Load the kernel tables
	# Load the difference images into a data cube

	print len(filenames), 'files found'


	dates = np.zeros(nfiles)
	seeing = np.zeros(nfiles)
	roundness = np.zeros(nfiles)
	bgnd = np.zeros(nfiles)
	signal = np.zeros(nfiles)
	norm_std = np.zeros(nfiles,dtype=np.float64)
	diff_std = np.zeros(nfiles,dtype=np.float64)
	n_kernel = np.zeros(nfiles,dtype=np.int32)
	n_coeffs = np.zeros(nfiles,dtype=np.int32)
	kindex_x = np.arange(0,dtype=np.int32)
	kindex_y = np.arange(0,dtype=np.int32)
	kindex_ext = np.arange(0,dtype=np.int32)
	coeffs = np.arange(0,dtype=np.float64)

	filenames.sort()

	if not converge:
	 	locate_iterations = 1

	threshold = -10
	for iteration in range(np.max([1,locate_iterations])):

	 	#ix0 = np.int32(x0+0.5)
	 	#iy0 = np.int32(y0+0.5)
		ix0 = np.int32(x0)
		iy0 = np.int32(y0)

		x_patch = x0 - ix0 + patch_half_width
		y_patch = y0 - iy0 + patch_half_width

		patch_size = 2*patch_half_width+1
		patch_slice = np.array([ix0-patch_half_width, ix0+patch_half_width+1, iy0-patch_half_width, iy0+patch_half_width+1])
		print 'patch_slice:', patch_slice

		# check that patch doesn't overlap the edge of the image
		f = filenames[0]
		diff, _ = IO.read_fits_file(params.loc_output+os.path.sep+'d_'+os.path.basename(f))
		nx = diff.shape[1]
		ny = diff.shape[0]
		delta_patch_x = 0
		delta_patch_y = 0
		if patch_slice[0] < 0:
			delta_patch_x = -patch_slice[0]
		elif patch_slice[1] >= nx:
			delta_patch_x = nx - patch_slice[1] - 1
		if patch_slice[2] < 0:
			delta_patch_y = -patch_slice[2]
		elif patch_slice[3] >= ny:
			delta_patch_y = ny - patch_slice[3] - 1

		print 'delta_patch_x, delta_patch_y:', delta_patch_x, delta_patch_y

		patch_slice += np.array([delta_patch_x,delta_patch_x,delta_patch_y,delta_patch_y])
		print 'patch_slice:', patch_slice

		x_patch -= delta_patch_x
		y_patch -= delta_patch_y

		d_image_stack = np.zeros((nfiles,patch_size,patch_size),dtype=np.float64)
		inv_var_image_stack = np.zeros((nfiles,patch_size,patch_size),dtype=np.float64)

		dmask = np.ones([patch_size,patch_size],dtype=np.bool)
		dmask_rad = 8.0
		dmix = np.linspace(-patch_half_width,patch_half_width,patch_size) - delta_patch_x
		dmiy = np.linspace(-patch_half_width,patch_half_width,patch_size) - delta_patch_y
		dmx, dmy = np.meshgrid(dmix,dmiy,indexing='ij')
		dmask[dmx**2 + dmy**2 < dmask_rad**2] = False

		for i, f in enumerate(filenames):

			basename = os.path.basename(f)
			ktable = params.loc_output+os.path.sep+'k_'+basename
			kernelIndex, extendedBasis, c, params = IO.read_kernel_table(ktable,params)
			coeffs = np.hstack((coeffs,c))
			kindex_x = np.hstack((kindex_x,kernelIndex[:,0].T))
			kindex_y = np.hstack((kindex_y,kernelIndex[:,1].T))
			kindex_ext = np.hstack((kindex_ext,extendedBasis))
			n_kernel[i] = kernelIndex.shape[0]
			n_coeffs[i] = c.shape[0]
			dates[i] = IO.get_date(params.loc_data+os.path.sep+basename,key=params.datekey)
			if dates[i] > 2450000:
				dates[i] -= 2450000
			seeing[i], roundness[i], bgnd[i], signal[i] = IM.compute_fwhm(f,params,width=20,image_name=True)

			dfile = params.loc_output+os.path.sep+'d_'+basename
			nfile = params.loc_output+os.path.sep+'n_'+basename
			zfile = params.loc_output+os.path.sep+'sm_'+basename
			ivfile = params.loc_output+os.path.sep+'iv_'+basename
			diff, _ = IO.read_fits_file(dfile)
			mask, _ = IO.read_fits_file(zfile)
			iv, _ = IO.read_fits_file(ivfile)
			diff_sc = IM.undo_photometric_scale(diff,c,params.pdeg)
			#diff_sc = diff
			#diff_sc -= median_filter(diff_sc,footprint=filter_kernel)
			diff_sc *= mask
			d_image_stack[i,:,:] = diff_sc[patch_slice[2]:patch_slice[3],patch_slice[0]:patch_slice[1]]
			inv_var_image_stack[i,:,:], _ = IO.read_fits_file(ivfile,slice=patch_slice)
			#inv_var_image_stack[i,:,:] = (norm / d_image_stack[i,:,:])**2
			#diff_std[i] = np.std(diff)
			diff_std[i] = np.std(d_image_stack[i,:,:][dmask])
			d_image_stack[i,:,:] -= np.median(d_image_stack[i,:,:])

		print 'kappa-clipping'
		qd = np.arange(len(filenames))
		qd1 = np.where(np.isfinite(diff_std))[0]
		for iter in range(10):
			qd = np.where(diff_std[qd1]<np.mean(diff_std[qd1])+(4.0-1.5*(iter/9.0))*np.std(diff_std[qd1]))[0]
			qd1 = qd1[qd]
			print iter, np.mean(diff_std[qd1]), np.std(diff_std[qd1]), np.mean(diff_std[qd1])+(4.0-3*(iter/9.0))*np.std(diff_std[qd1])

		print 'mean(diff) :',np.mean(diff_std[qd1])
		print 'std(diff) :',np.std(diff_std[qd1])
		print '1-sig threshold:', np.mean(diff_std[qd1])+1*np.std(diff_std[qd1])
		print '2-sig threshold:', np.mean(diff_std[qd1])+2*np.std(diff_std[qd1])
		print '3-sig threshold:', np.mean(diff_std[qd1])+3*np.std(diff_std[qd1])

		print '1-sig diff reject:',np.where(diff_std>np.mean(diff_std[qd1])+1*np.std(diff_std[qd1]))
		print '2-sig diff reject:',np.where(diff_std>np.mean(diff_std[qd1])+2*np.std(diff_std[qd1]))
		print '3-sig diff reject:',np.where(diff_std>np.mean(diff_std[qd1])+3*np.std(diff_std[qd1]))

		threshold = np.mean(diff_std[qd1])+q_sigma_threshold*np.std(diff_std[qd1])
		threshold2 = np.mean(diff_std[qd1])+2*np.std(diff_std[qd1])
		threshold3 = np.mean(diff_std[qd1])+3*np.std(diff_std[qd1])

		if locate_date_range is not None:
			diff_std_copy = diff_std.copy()
			diff_std = diff_std*0.0 + 100.0*threshold
			pp = np.where((dates>locate_date_range[0]) & (dates<locate_date_range[1]))[0]
			if pp.any():
				print 'Using images ',pp
				diff_std[pp] = diff_std_copy[pp]
			else:
				print 'Error: No images found in date range',locate_date_range
				print 'Reverting to all dates.'
				diff_std = diff_std_copy

		print 'zeros:'
		for i in range(nfiles):
			print i, np.sum(np.abs(d_image_stack[i,:,:]) < 1.e-6)
			if np.isnan(inv_var_image_stack[i,:,:]).any() or np.sum(np.abs(d_image_stack[i,:,:]) < 1.e-6) > 5:
				diff_std[i] = 100.0*threshold
				inv_var_image_stack[i,:,:] = inv_var_image_stack[i,:,:]*0.0


		if save_stamps:
			save_mosaic(d_image_stack,nfiles,patch_size,params.loc_output+os.path.sep+stamp_prefix+'.fits',diff_std,threshold)

		dsum = np.zeros((patch_size,patch_size),dtype=np.float64)
		for i in range(nfiles):
			if diff_std[i] < threshold3:
				dsum += d_image_stack[i,:,:]
		IO.write_image(dsum,params.loc_output+os.path.sep+'dsum%d.fits'%iteration)
		dr = patch_half_width-int(locate_half_width)
		print 'dr:', dr
		dsum[:dr-delta_patch_y,:] = 0.0
		dsum[-dr-delta_patch_y:,:] = 0.0
		dsum[:,:dr-delta_patch_x] = 0.0
		dsum[:,-dr-delta_patch_x:] = 0.0
		IO.write_image(dsum,params.loc_output+os.path.sep+'dsum_m%d.fits'%iteration)
		ind_dsum_max = np.unravel_index(dsum.argmax(),dsum.shape)
		print 'Iteration',iteration,': dsum maximum located at ',ind_dsum_max

		if locate and converge:
			y0 += ind_dsum_max[0] - patch_half_width + delta_patch_y
			x0 += ind_dsum_max[1] - patch_half_width + delta_patch_x


	# Read the PSF

	psf_image = params.loc_output+os.path.sep+'psf.fits'
	psf,psf_hdr = fits.getdata(psf_image,0,header='true')
	psf_height = psf_hdr['PSFHEIGH']
	psf_sigma_x = psf_hdr['PAR1']*0.8493218
	psf_sigma_y = psf_hdr['PAR2']*0.8493218
	psf_x = psf_hdr['PSFX']
	psf_y = psf_hdr['PSFY']
	psf_size = psf.shape[1]
	psf_fit_rad = params.psf_fit_radius
	psf_parameters = np.array([psf_size,psf_height,psf_sigma_x,psf_sigma_y,psf_x,
							   psf_y,psf_fit_rad,params.gain]).astype(np.float64)

	if params.psf_profile_type == 'gaussian':
		psf_sigma_x = psf_hdr['PAR1']*0.8493218
		psf_sigma_y = psf_hdr['PAR2']*0.8493218
		psf_parameters = np.array([psf_size,psf_height,psf_sigma_x,psf_sigma_y,psf_x,
								   psf_y,psf_fit_rad,params.gain]).astype(np.float64)
		profile_type = 0
	elif params.psf_profile_type == 'moffat25':
		print 'params.psf_profile_type moffat25 not working yet. Exiting.'
		sys.exit(0)
		psf_sigma_x = psf_hdr['PAR1']
		psf_sigma_y = psf_hdr['PAR2']
		psf_sigma_xy = psf_hdr['PAR3']
		psf_parameters = np.array([psf_size,psf_height,psf_sigma_x,psf_sigma_y,psf_x,
								   psf_y,
								   psf_fit_rad,params.gain,psf_sigma_xy]).astype(np.float64)
		profile_type = 1
	else:
		print 'params.psf_profile_type undefined'
		sys.exit(0)

	psf_0 = psf.astype(np.float64).copy()
	psf_xd = psf.astype(np.float64).copy()*0.0
	psf_yd = psf.astype(np.float64).copy()*0.0
	flux = np.zeros(nfiles,dtype=np.float64)
	dflux = np.zeros(nfiles,dtype=np.float64)

	x0_arr = np.atleast_1d(np.array([x0],dtype=np.float64))
	y0_arr = np.atleast_1d(np.array([y0],dtype=np.float64))

	print 'Converging photometry'
	print 'x0, y0:', x0, y0
	print 'x_patch, y_patch:', x_patch, y_patch
	good_images = np.where(diff_std < threshold)[0]
	print 'using images', good_images
	print 'threshold', threshold
	for i, f in enumerate(filenames):
		if i in good_images:
			print i, 'd_'+f, diff_std[i] 

	cu_photom_converge(profile_type, patch_half_width, params.pdeg, params.sdeg, nfiles, 
						n_kernel, kindex_x, kindex_y, kindex_ext, n_coeffs, coeffs.astype(np.float64),
						psf_parameters, psf_0, psf_xd, psf_yd,
						np.float64(d_image_stack.ravel()), np.float64(inv_var_image_stack.ravel()), diff_std, np.float64(threshold),
						x0_arr, y0_arr, x_patch, y_patch, diff.shape[1], diff.shape[0], 16, 16, flux, dflux, 
						np.float64(params.gain),np.int32(converge),np.float64(2.5))

	if save_stamps:
		save_mosaic(d_image_stack,nfiles,patch_size,params.loc_output+os.path.sep+'p'+stamp_prefix+'.fits',diff_std,threshold)

	if locate_date_range is not None:
	  diff_std = diff_std_copy

	return dates, seeing, roundness, bgnd, signal, flux, dflux, diff_std/threshold, x0_arr[0], y0_arr[0]
Example #15
0
def photom_all_stars_simultaneous(diff,inv_variance,positions,psf_image,c,kernelIndex,
								  extendedBasis,kernelRadius,params,
								  star_group_boundaries,
								  detector_mean_positions_x,detector_mean_positions_y):
	
	from astropy.io import fits
	# Read the PSF
	psf,psf_hdr = fits.getdata(psf_image,0,header='true')
	psf_height = psf_hdr['PSFHEIGH']
	psf_x = psf_hdr['PSFX']
	psf_y = psf_hdr['PSFY']
	psf_size = psf.shape[1]
	psf_fit_rad = params.psf_fit_radius
	#psf_fit_rad = 3.1
	if params.psf_profile_type == 'gaussian':
		psf_sigma_x = psf_hdr['PAR1']*0.8493218
		psf_sigma_y = psf_hdr['PAR2']*0.8493218
		psf_parameters = np.array([psf_size,psf_height,psf_sigma_x,psf_sigma_y,psf_x,
								   psf_y,psf_fit_rad,params.gain]).astype(np.float64)
		print 'psf_parameters',psf_parameters
		profile_type = 0
	elif params.psf_profile_type == 'moffat25':
		print 'params.psf_profile_type moffat25 not working yet. Exiting.'
		sys.exit(0)
		psf_sigma_x = psf_hdr['PAR1']
		psf_sigma_y = psf_hdr['PAR2']
		psf_sigma_xy = psf_hdr['PAR3']
		psf_parameters = np.array([psf_size,psf_height,psf_sigma_x,psf_sigma_y,psf_x,
								   psf_y,
								   psf_fit_rad,params.gain,psf_sigma_xy]).astype(np.float64)
		print 'psf_parameters',psf_parameters
		profile_type = 1
	else:
		print 'params.psf_profile_type undefined'
		sys.exit(0)

	
	k0 = kernelIndex[:,0].astype(np.int32).copy()
	k1 = kernelIndex[:,1].astype(np.int32).copy()
	if params.star_file_is_one_based:
		posx = np.float64(positions[:,0]-1.0)
		posy = np.float64(positions[:,1]-1.0)
	else:
		posx = np.float64(positions[:,0]-0.0)
		posy = np.float64(positions[:,1]-0.0)
		
	psf_0 = psf.astype(np.float64)
	psf_xd = np.zeros_like(psf_0,dtype=np.float64)
	psf_yd = np.zeros_like(psf_0,dtype=np.float64)
	nstars = positions.shape[0]
	flux = np.zeros(nstars+1,dtype=np.float64)
	dflux = np.zeros(nstars+1,dtype=np.float64)
	c64 = c.astype(np.float64).copy()

	print 'nstars', nstars
	print 'flux', flux.shape
	print 'dflux', dflux.shape

	i_index = ctypes.POINTER(ctypes.c_int)()
	j_index = ctypes.POINTER(ctypes.c_int)()
	value = ctypes.POINTER(ctypes.c_double)()
	n_entries = ctypes.c_int()

	rvec = np.zeros(nstars).astype(np.float64).copy()

	for iteration in range(1):

	  cu_multi_photom(np.int(profile_type), diff.shape[1], diff.shape[0], params.pdeg,
			  params.sdeg, c.shape[0], kernelIndex.shape[0],
			  np.int(kernelRadius), k0,
			  k1, extendedBasis,
			  psf_parameters, psf_0, psf_xd, psf_yd,
			  posx, posy, c64, long(nstars), 16, 16, np.float64(diff),
			  np.float64(inv_variance),np.int32(star_group_boundaries),
			  np.float64(detector_mean_positions_x),
			  np.float64(detector_mean_positions_y),star_group_boundaries.shape[0],ctypes.byref(n_entries),
			  ctypes.byref(i_index), ctypes.byref(j_index), ctypes.byref(value), rvec, flux[:nstars], iteration)


	  n_e = np.int32(n_entries)

	  buf_from_mem = ctypes.pythonapi.PyBuffer_FromMemory
	  buf_from_mem.restype = ctypes.py_object
	  
	  buffer = buf_from_mem(i_index, n_e*np.dtype(np.int32).itemsize)
	  i_ind = np.frombuffer(buffer, np.int32)

	  buffer = buf_from_mem(j_index, n_e*np.dtype(np.int32).itemsize)
	  j_ind = np.frombuffer(buffer, np.int32)

	  buffer = buf_from_mem(value, n_e*np.dtype(np.float64).itemsize)
	  val = np.frombuffer(buffer, np.float64)


	  #for row in range(20):
	  #  print 'Row', row
	  #  q = np.where(i_ind == row)
	  #  print q
	  #  for qq in q[0]:
	  #    print j_ind[qq], val[qq]
	  #  print rvec[row]


	  A = csc_matrix((val,(i_ind,j_ind)),shape=(nstars, nstars))

	  flux = np.float64(sp_linalg.spsolve(A, rvec))
	  dflux = np.sqrt(sp_linalg.spsolve(A, np.ones_like(rvec)))
	
	  print 'flux =', flux
	  print 'dflux =', dflux

	  cdiff = np.float64(diff).copy()

	  cu_make_residual(np.int(profile_type), diff.shape[1], diff.shape[0], params.pdeg,
			  params.sdeg, c.shape[0], kernelIndex.shape[0],
			  np.int(kernelRadius), k0,
			  k1, extendedBasis,
			  psf_parameters, psf_0, psf_xd, psf_yd,
			  posx, posy, c64, flux, dflux, long(nstars), 16, 16, cdiff,
			  np.float64(inv_variance),np.int32(star_group_boundaries),
			  np.float64(detector_mean_positions_x),
			  np.float64(detector_mean_positions_y),star_group_boundaries.shape[0])

	  IO.write_image(cdiff,params.loc_output+os.path.sep+'p_clean_ref.fits')

	return flux, dflux
Example #16
0
def imsub_all_fits(params, reference='ref.fits'):

    #
    # Create the output directory if it doesn't exist
    #
    if not (os.path.exists(params.loc_output)):
        os.mkdir(params.loc_output)

    #
    # The degree of spatial shape changes has to be at least as
    # high as the degree of spatial photometric scale
    #
    if (params.sdeg < params.pdeg):
        print 'Increasing params.sdeg to ', params.pdeg
        params.sdeg = params.pdeg

    #
    # Print out the parameters for this run.
    #
    print 'Parameters:'
    for par in dir(params):
        print par, getattr(params, par)
    print

    #
    # Determine our list of images
    #
    all_files = os.listdir(params.loc_data)
    all_files.sort()
    files = []
    for f in all_files:
        print 'file', f
        if fnmatch.fnmatch(f, params.name_pattern):
            g = DS.Observation(params.loc_data + os.path.sep + f, params)
            del g.data
            del g.mask
            print 'fw', g.fw
            if g.fw > 0.0:
                files.append(g)
            print g.name, 'accepted'

    if len(files) < 3:
        print 'Only', len(files), 'files found matching', params.name_pattern
        print 'Exiting'
        sys.exit(0)

    #
    # Have we specified a registration template?
    #
    if params.registration_image:
        reg = DS.Observation(params.registration_image, params)
    else:
        reg = DS.EmptyBase()
        reg.fw = 999.0
        for f in files:
            if (f.fw < reg.fw) and (f.fw > params.reference_min_seeing) and (
                    f.sky < params.registration_max_background):
                reg = f

    print 'Registration image:', reg.name

    #
    # Register images
    #
    print 'Registering images'
    files_copy = [f for f in files]
    for f in files:
        print f.name
        if f == reg:
            f.image = f.data
            rf = params.loc_output + os.path.sep + 'r_' + f.name
            IO.write_image(f.image, rf)
        else:
            if not f.register(reg, params):
                files_copy.remove(f)
            # delete image arrays to save memory
            del f.image
            del f.mask
            del f.inv_variance
        del reg.data
        del reg.image
        del reg.mask
        del reg.inv_variance
    files = files_copy

    #
    # Write image names and dates to a file
    #
    if params.image_list_file:
        try:
            with open(params.loc_output + os.path.sep + params.image_list_file,
                      'w') as fid:
                for f in files:
                    date = None
                    if params.datekey:
                        date = IO.get_date(
                            params.loc_data + os.path.sep + f.name,
                            key=params.datekey) - 2450000
                    if date:
                        fid.write(f.name + '   %10.5f\n' % date)
                    else:
                        fid.write(f.name)
        except:
            raise

    #
    # Make the photometric reference image if we don't have it.
    # Find stamp positions if required.
    #
    if not (os.path.exists(params.loc_output + os.path.sep + reference)):
        print 'Reg = ', reg.name
        stamp_positions = make_reference(files,
                                         reg,
                                         params,
                                         reference_image=reference)
        ref = DS.Observation(params.loc_output + os.path.sep + reference,
                             params)
        mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' +
                                    reference)
        variance, _ = IO.read_fits_file(params.loc_output + os.path.sep +
                                        'var_' + reference)
        ref.mask = mask
        ref.inv_variance = 1.0 / variance
        ref.register(reg, params)
    else:
        ref = DS.Observation(params.loc_output + os.path.sep + reference,
                             params)
        if os.path.exists(params.loc_output + os.path.sep + 'mask_' +
                          reference):
            mask, _ = IO.read_fits_file(params.loc_output + os.path.sep +
                                        'mask_' + reference)
        else:
            mask = np.ones_like(ref.data)
        ref.mask = mask
        ref.register(reg, params)
        stamp_positions = None
        if params.use_stamps:
            stamp_file = params.loc_output + os.path.sep + 'stamp_positions'
            if os.path.exists(stamp_file):
                stamp_positions = np.genfromtxt(stamp_file)
            else:
                stars = PF.choose_stamps(ref, params)
                stamp_positions = stars[:, 0:2]
                np.savetxt(stamp_file, stamp_positions)

    pm = params.pixel_max
    params.pixel_max *= 0.9
    ref.mask *= IM.compute_saturated_pixel_mask(ref.image, params)
    params.pixel_max = pm
    ref.blur = IM.boxcar_blur(ref.image)
    if params.mask_cluster:
        ref.mask *= IM.mask_cluster(ref.image, ref.mask, params)

    #
    # Detect stars and compute the PSF if we are doing photometry
    #
    star_positions = None
    sky = 0.0
    if params.do_photometry:
        star_file = params.loc_output + os.path.sep + 'star_positions'
        psf_file = params.loc_output + os.path.sep + 'psf.fits'
        if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)):
            stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
            star_positions = stars[:, 0:2]
            star_sky = stars[:, 4]
        if os.path.exists(star_file):
            star_positions = np.genfromtxt(star_file)
            star_sky = star_positions[:, 0] * 0.0
        else:
            np.savetxt(star_file, star_positions)

    print 'sky =', sky

    #
    # If we have pre-determined star positions
    #
    if params.star_file:
        stars = np.genfromtxt(params.star_file)
        star_positions = stars[:, 1:3]
        if params.star_reference_image:
            star_ref, h = IO.read_fits_file(params.star_reference_image)
            offset, _, _ = register_translation(star_ref, ref.image, 1000)
            dy, dx = offset
            #dy, dx = IM.positional_shift(ref.image,star_ref)
            print 'position shift =', dx, dy
            star_positions[:, 0] -= dx
            star_positions[:, 1] -= dy
        np.savetxt(star_file, star_positions)

    #
    # If we are using a CPU, group the stars by location
    #
    print 'Group_check'
    print 'params.do_photometry', params.do_photometry
    print 'params.use_GPU', params.use_GPU
    if params.do_photometry:
        star_group_boundaries = None
        detector_mean_positions_x = None
        detector_mean_positions_y = None
        star_unsort_index = None
        star_sort_index,star_group_boundaries,detector_mean_positions_x,detector_mean_positions_y = \
             PH.group_stars_ccd(params,star_positions,params.loc_output+os.path.sep+reference)
        star_positions = star_positions[star_sort_index]
        star_sky = star_sky[star_sort_index]
        star_unsort_index = np.argsort(star_sort_index)

    #
    # Do photometry of the reference image
    #
    if params.do_photometry:
        ref_flux_file = params.loc_output + os.path.sep + 'ref.flux'
        if not (os.path.exists(ref_flux_file)):
            result = difference_image(
                ref,
                ref,
                params,
                stamp_positions=stamp_positions,
                psf_image=psf_file,
                star_positions=star_positions,
                star_group_boundaries=star_group_boundaries,
                detector_mean_positions_x=detector_mean_positions_x,
                detector_mean_positions_y=detector_mean_positions_y,
                star_sky=star_sky)
            if isinstance(result.flux, np.ndarray):
                print 'ungrouping fluxes'
                result.flux = result.flux[star_unsort_index].copy()
                result.dflux = result.dflux[star_unsort_index].copy()
                np.savetxt(ref_flux_file,
                           np.vstack((result.flux, result.dflux)).T)

    #
    # Process images
    #

    if params.make_difference_images:

        if not (params.use_GPU) and (params.n_parallel > 1):

            pool = Pool(params.n_parallel)
            pool.map(
                process_image_helper,
                itertools.izip(
                    files,
                    itertools.repeat(
                        (ref, params, stamp_positions, star_positions,
                         star_group_boundaries, star_unsort_index,
                         detector_mean_positions_x,
                         detector_mean_positions_y))))

        else:

            for f in files:
                process_image(
                    f, (ref, params, stamp_positions, star_positions,
                        star_group_boundaries, star_unsort_index,
                        detector_mean_positions_x, detector_mean_positions_y))

    return files
Example #17
0
 def set_mask(self, value):
     self._mask = value
     mask_name = os.path.join(self.output_dir, 'sm_' + self.name)
     IO.write_image(self._mask, mask_name)
def compute_psf_image(params,g,psf_deg=1,psf_rad=8,
                      star_file='phot.mags',psf_image='psf.fits',edge_dist=5):
    iraf.digiphot()
    iraf.daophot()
    fp = params.loc_output+os.path.sep

    f_im = g.image*g.mask
    f = fp+'temp.ref.fits'
    write_image(f_im,f)

    g.fw = np.max([1.5,g.fw])
    g.fw = np.min([0.5*params.psf_max_radius,g.fw])

    logfile = fp+'psf.log'

    fd = fits.getdata(f)
    xmax = fd.shape[0] - edge_dist
    ymax = fd.shape[1] - edge_dist
    

    for d in ['temp.stars','temp.phot','temp.phot1','temp.phot2','temp.pst',
              'temp.opst','temp.opst2',
              'temp.psf.fits','temp.psf1.fits','temp.psf2.fits','temp.psg',
              'temp.psg2','temp.psg3','temp.psg5','temp.rej','temp.rej2',
              'temp.sub.fits','temp.sub1.fits',
              'temp.sub2.fits','temp.opst1','temp.opst3','temp.rej3',
              'temp.nst','temp.stars1','ref.mags',psf_image,'temp.als',
              'temp.als2']:
            if os.path.exists(fp+d):
                os.remove(fp+d)


    # locate stars
    iraf.daofind(image=f,output=fp+'temp.stars',interactive='no',verify='no',
                 threshold=3,sigma=params.star_detect_sigma,fwhmpsf=g.fw,
                 datamin=1,datamax=params.pixel_max,
                 epadu=params.gain,readnoise=params.readnoise,
                 noise='poisson')

    if params.star_file:
        als_recenter = 'no'
        all_template_stars = np.genfromtxt(params.star_file)
        all_new_stars = np.genfromtxt(fp+'temp.stars')
        
        if all_new_stars.shape[0] > params.star_file_number_match:
            new_stars = all_new_stars[all_new_stars[:,2].argsort()][:params.star_file_number_match]
        else:
            new_stars = all_new_stars

        if all_template_stars.shape[0] > params.star_file_number_match:
            template_stars = all_template_stars[all_template_stars[:,3].argsort()][:params.star_file_number_match]
        else:
            template_stars = all_template_stars

        tx, ty = compute_xy_shift(new_stars,template_stars[:,1:3],0.5,
                                  degree=params.star_file_transform_degree)

        if params.star_file_has_magnitudes:
            star_positions = all_template_stars[:,1:4]
            xx = (star_positions[:,0]-np.mean(new_stars[:,0]))/np.mean(new_stars[:,0])
            yy = (star_positions[:,1]-np.mean(new_stars[:,1]))/np.mean(new_stars[:,1])
            for m in range(params.star_file_transform_degree+1):
                for n in range(params.star_file_transform_degree+1-m):
                    star_positions[:,0] += tx[m,n]* xx**m * yy**n
                    star_positions[:,1] += ty[m,n]* xx**m * yy**n
            np.savetxt(fp+'temp.stars.1',star_positions,fmt='%10.3f %10.3f %10.3f')
        else:
            star_positions = all_template_stars[:,1:3]
            xx = (star_positions[:,0]-np.mean(new_stars[:,0]))/np.mean(new_stars[:,0])
            yy = (star_positions[:,1]-np.mean(new_stars[:,1]))/np.mean(new_stars[:,1])
            for m in range(params.star_file_transform_degree+1):
                for n in range(params.star_file_transform_degree+1-m):
                    star_positions[:,0] += tx[m,n]* xx**m * yy**n
                    star_positions[:,1] += ty[m,n]* xx**m * yy**n
            np.savetxt(fp+'temp.stars.1',star_positions,fmt='%10.3f %10.3f')
        all_template_stars[:,1] = star_positions[:,0]
        all_template_stars[:,2] = star_positions[:,1]
            
    else:
        
        als_recenter = 'yes'
        star_positions = np.genfromtxt(fp+'temp.stars')
        np.savetxt(fp+'temp.stars.1',star_positions[:,:2],fmt='%10.3f %10.3f')

    iraf.phot(image=f,output=fp+'temp.phot',coords=fp+'temp.stars.1',interactive='no',
              verify='no',
              sigma=params.star_detect_sigma,fwhmpsf=g.fw,apertures=g.fw,
              datamin=1,
              datamax=2*params.pixel_max,epadu=params.gain,annulus=3*g.fw,
              dannulus=3.0,
              readnoise=params.readnoise,noise='poisson')

    print 'fw = ',g.fw
    #fw = np.max([4.0,fw])
    #print 'fw = ',fw


    # select PSF stars
    iraf.pstselect(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',maxnpsf=40,
                   interactive='no',verify='no',datamin=1,fitrad=2.0,
                   datamax=params.pixel_max,epadu=params.gain,psfrad=np.max([3.0,g.fw]),
                   readnoise=params.readnoise,noise='poisson')

    if params.star_file and params.star_file_has_magnitudes:

        # We don't need to do the photometry - only make the PSF

        # Initial PSF estimate to generate PSF groups
        #psfrad=3*np.max([g.fw,1.8])
        iraf.psf(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf',
                 function=params.psf_profile_type,opstfile=fp+'temp.opst',
                 groupfile=fp+'temp.psg',
                 interactive='no',
                 verify='no',varorder=0 ,psfrad=2*np.max([g.fw,1.8]),
                 datamin=-10000,datamax=0.95*params.pixel_max,
                 scale=1.0)

        # construct a file of the psf neighbour stars
        slist = []
        psf_stars = np.loadtxt(fp+'temp.opst',usecols=(0,1,2))

        for star in range(psf_stars.shape[0]):

            xp = psf_stars[star,1]
            yp = psf_stars[star,2]
            xmin = np.max([np.int(xp-10*g.fw),0])
            xmax = np.min([np.int(xp+10*g.fw),f_im.shape[0]])
            ymin = np.max([np.int(yp-10*g.fw),0])
            ymax = np.min([np.int(yp+10*g.fw),f_im.shape[1]])

            p = star_positions[np.logical_and(np.logical_and(star_positions[:,0]>xmin,
                                                             star_positions[:,0]<xmax),
                                              np.logical_and(star_positions[:,1]>ymin,
                                                             star_positions[:,1]<ymax))]
            slist.append(p)

        group_stars = np.concatenate(slist)
        np.savetxt(fp+'temp.nst',group_stars,fmt='%10.3f %10.3f %10.3f')
        
        
        # subtract PSF star neighbours
        iraf.substar(image=f,photfile=fp+'temp.nst',psfimage=fp+'temp.psf',
                     exfile=fp+'temp.opst',fitrad=2.0,
                     subimage=fp+'temp.sub1',verify='no',datamin=1,
                     datamax=params.pixel_max,epadu=params.gain,
                     readnoise=params.readnoise,noise='poisson')
        
        # final PSF
        iraf.psf(image=fp+'temp.sub1',photfile=fp+'temp.phot',pstfile=fp+'temp.opst',
                 psfimage=psf_image,psfrad=2*g.fw,
                 function=params.psf_profile_type,opstfile=fp+'temp.opst2',
                 groupfile=fp+'temp.psg2',
                 interactive='no',
                 verify='no',varorder=0,
                 datamin=1,datamax=0.95*params.pixel_max,
                 scale=1.0)

        np.savetxt(fp+'ref.mags',all_template_stars,fmt='%7d %10.3f %10.3f %10.3f')
        stars = all_template_stars

    else:




        # initial PSF estimate
        iraf.psf(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf',
                 function=params.psf_profile_type,opstfile=fp+'temp.opst',
                 groupfile=fp+'temp.psg1',
                 interactive='no',
                 verify='no',varorder=0 ,psfrad=2*g.fw,
                 datamin=1,datamax=0.95*params.pixel_max,
                 scale=1.0)


        # separation distance of near neighbours
        separation = np.max([rewrite_psg(fp+'temp.psg1',fp+'temp.psg2'),3])
        print 'separation = ',separation

        # subtract all stars using truncated PSF
        iraf.allstar(image=f,photfile=fp+'temp.phot',psfimage=fp+'temp.psf',
                     allstarfile=fp+'temp.als',rejfile='',
                     subimage=fp+'temp.sub',verify='no',psfrad=2*g.fw,fitrad=2.0,
                     recenter='yes',groupsky='yes',fitsky='yes',sannulus=7,wsannulus=10,
                     datamin=1,datamax=params.pixel_max,
                     epadu=params.gain,readnoise=params.readnoise,
                     noise='poisson')

        if params.star_file:

            os.system('cp '+fp+'temp.phot '+fp+'temp.phot2') 

        else:
        
            # locate new stars
            iraf.daofind(image=fp+'temp.sub',output=fp+'temp.stars1',interactive='no',verify='no',
                         threshold=3,sigma=params.star_detect_sigma,fwhmpsf=2*g.fw,
                         datamin=1,datamax=params.pixel_max,
                         epadu=params.gain,readnoise=params.readnoise,
                         noise='poisson')


            # magnitudes for new stars
            iraf.phot(image=fp+'temp.sub',output=fp+'temp.phot1',coords=fp+'temp.stars1',
                      interactive='no',
                      verify='no',sigma=params.star_detect_sigma,
                      fwhmpsf=g.fw,datamin=1,
                      datamax=params.pixel_max,epadu=params.gain,
                      readnoise=params.readnoise,noise='poisson')

            # join star lists together
            iraf.pconcat(infiles=fp+'temp.phot,'+fp+'temp.phot1',outfile=fp+'temp.phot2')

        # new PSF estimate to generate PSF groups
        iraf.psf(image=f,photfile=fp+'temp.phot2',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf2',
                 function=params.psf_profile_type,opstfile=fp+'temp.opst2',
                 groupfile=fp+'temp.psg3',
                 interactive='no',
                 verify='no',varorder=0 ,psfrad=2*g.fw,
                 datamin=-10000,datamax=0.95*params.pixel_max,
                 scale=1.0)

        # magnitudes for PSF group stars
        iraf.nstar(image=f,groupfile=fp+'temp.psg3',psfimage=fp+'temp.psf2',
                   nstarfile=fp+'temp.nst',
                   rejfile='',verify='no',psfrad=2*g.fw,fitrad=2.0,
                   recenter='no',
                   groupsky='yes',fitsky='yes',sannulus=7,wsannulus=10,
                   datamin=1,datamax=params.pixel_max,
                   epadu=params.gain,readnoise=params.readnoise,noise='poisson')

        # subtract PSF star neighbours
        iraf.substar(image=f,photfile=fp+'temp.nst',psfimage=fp+'temp.psf2',
                     exfile=fp+'temp.opst2',fitrad=2.0,
                     subimage=fp+'temp.sub1',verify='no',datamin=1,
                     datamax=params.pixel_max,epadu=params.gain,
                     readnoise=params.readnoise,noise='poisson')
        
        # final PSF
        iraf.psf(image=fp+'temp.sub1',photfile=fp+'temp.phot2',
                 pstfile=fp+'temp.opst2',
                 psfimage=psf_image,psfrad=2*g.fw,
                 function=params.psf_profile_type,opstfile=fp+'temp.opst3',
                 groupfile=fp+'temp.psg5',
                 interactive='no',
                 verify='no',varorder=0,
                 datamin=1,datamax=0.95*params.pixel_max,
                 scale=1.0)

        # final photometry

        
        iraf.allstar(image=g.fullname,photfile=fp+'temp.phot2',psfimage=psf_image,
                     allstarfile=fp+'temp.als2',rejfile='',
                     subimage=fp+'temp.sub2',verify='no',psfrad=2*g.fw,
                     recenter=als_recenter,groupsky='yes',fitsky='yes',sannulus=7,
                     wsannulus=10,fitrad=2.0,
                     datamin=params.pixel_min,datamax=params.pixel_max,
                     epadu=params.gain,readnoise=params.readnoise,
                     noise='poisson')

        psfmag = 10.0
        for line in open(fp+'temp.als2','r'):
            sline = line.split()
            if sline[1] == 'PSFMAG':
                psfmag = float(sline[3])
                break

        if params.star_file:
            
            iraf.psort(infiles=fp+'temp.als2',field='ID')
            os.system('cp '+fp+'temp.als2 '+fp+'temp.als3') 

        else:
        
            selection = 'XCE >= '+str(edge_dist)+' && XCE <= '+str(xmax)+' && YCE >= '+str(edge_dist)+' && YCE <= '+str(ymax)+' && MAG != INDEF'
            iraf.pselect(infiles=fp+'temp.als2',outfiles=fp+'temp.als3',expr=selection)
            iraf.psort(infiles=fp+'temp.als3',field='MAG')   
            iraf.prenumber(infile=fp+'temp.als3')
            
        s = iraf.pdump(infiles=fp+'temp.als3',Stdout=1,
                       fields='ID,XCENTER,YCENTER,MAG,MERR,MSKY,SHARPNESS,CHI',expr='yes')
        sf = [k.replace('INDEF','22.00') for k in s]
        stars = np.zeros([len(sf),5])
        for i, line in enumerate(sf):
            stars[i,:] = np.array(map(float,sf[i].split()[1:6]))

        s = iraf.pdump(infiles=fp+'temp.als3',Stdout=1,
                       fields='ID,XCENTER,YCENTER,MAG,MERR,SHARPNESS,CHI,MSKY',expr='yes')
        sf = [k.replace('INDEF','22.00') for k in s]
        with open(fp+'ref.mags','w') as fid:
            for s in sf:
                fid.write(s+'\n')

    return stars
Example #19
0
 def set_inv_variance(self, value):
     self._inv_variance = value
     inv_variance_name = os.path.join(self.output_dir, 'iv_' + self.name)
     IO.write_image(self._inv_variance, inv_variance_name)