def main(data_title, sextractor_path, origin, destination):
    properties = p.object_params_imacs(data_title)
    data_dir = properties['data_dir']
    if sextractor_path is not None:
        if not os.path.isdir(sextractor_path):
            os.mkdir(sextractor_path)
        do_sextractor = True
        ap_diams_sex = p.load_params(f'param/aperture_diameters_fors2')
    else:
        do_sextractor = False

    origin_path = data_dir + origin
    destination_path = data_dir + destination
    u.mkdir_check(destination_path)
    filters = next(os.walk(origin_path))[1]

    for fil in filters:
        u.mkdir_check(destination_path + fil)
        if do_sextractor:
            if not os.path.isdir(sextractor_path + fil):
                os.mkdir(sextractor_path + fil)
        files = os.listdir(origin_path + fil + "/")
        for file_name in files:
            if file_name[-5:] == '.fits':
                science_origin = origin_path + fil + "/" + file_name
                science_destination = destination_path + fil + "/" + file_name
                print(science_origin)
                # Divide by exposure time to get an image in counts/second.
                f.divide_by_exp_time(file=science_origin,
                                     output=science_destination)
                if do_sextractor:
                    copyfile(science_origin,
                             sextractor_path + fil + "/" + file_name)

        # Write a sextractor file for photometric testing of the data from the upper chip.
        if do_sextractor:
            # Write a csv table of file properties to each filter directory.
            tbl = f.fits_table(input_path=sextractor_path + fil,
                               output_path=sextractor_path + fil + "/" + fil +
                               "_fits_tbl.csv",
                               science_only=False)
            # TODO: Rewrite to use psf-fitting (in FORS2 pipeline as well)
            for i, d in enumerate(ap_diams_sex):
                f.write_sextractor_script(table=tbl,
                                          output_path=sextractor_path + fil +
                                          "/sextract_aperture_" + str(d) +
                                          ".sh",
                                          sex_params=['c', 'PHOT_APERTURES'],
                                          sex_param_values=['im.sex',
                                                            str(d)],
                                          cat_name='sextracted_' + str(d),
                                          cats_dir='aperture_' + str(d),
                                          criterion='chip',
                                          value='CHIP1')

    if os.path.isfile(origin_path + data_title + '.log'):
        copyfile(origin_path + data_title + '.log',
                 destination_path + data_title + ".log")
    u.write_log(path=destination_path + data_title + ".log",
                action=f'Astrometry solved using 3-astrometry.py')
def main(data_title: str, origin: str, destination: str, redo: bool = False):
    properties = p.object_params_imacs(data_title)
    path = properties['data_dir']

    origin_path = path + origin
    astrometry_path = path + destination
    u.mkdir_check(astrometry_path)

    keys = params.load_params('param/keys')
    key = keys['astrometry']

    reduced_list = os.listdir(origin_path)
    astrometry_list = os.listdir(astrometry_path)

    if redo:
        to_send = list(filter(lambda f: f[-5:] == '.fits', reduced_list))
    else:
        to_send = list(
            filter(lambda f: f[-5:] == '.fits' and f not in astrometry_list,
                   reduced_list))

    filters = list(filter(lambda f: os.path.isdir(f), os.listdir(origin)))

    for f in filters:
        reduced_path_filter = origin_path + f + '/'
        astrometry_path_filter = astrometry_path + f + '/'
        print(f'To send to Astrometry.net from {f}:')
        for file in to_send:
            print('\t' + file)

        for file in to_send:
            hdu = fits.open(origin_path + file)
            header = hdu[0].header
            ra = header['RA-D']
            dec = header['DEC-D']
            scale_upper = header['SCALE'] + 0.1
            scale_lower = header['SCALE'] - 0.1
            hdu.close()
            print('Sending to Astrometry.net:', file)
            os.system(f'python /astrometry-client.py '
                      f'--apikey {key} '
                      f'-u {reduced_path_filter}{file} '
                      f'-w '
                      f'--newfits {astrometry_path_filter}{file} '
                      f'--ra {ra} --dec {dec} --radius {1.} '
                      f'--scale-upper {scale_upper} '
                      f'--scale-lower {scale_lower} '
                      f'--private --no_commercial')

    if os.path.isfile(origin_path + data_title + '.log'):
        shutil.copy(origin_path + data_title + '.log',
                    astrometry_path + data_title + ".log")
    u.write_log(path=astrometry_path + data_title + ".log",
                action=f'Astrometry solved using 3-astrometry.py')
Beispiel #3
0
def main(data_title, origin, destination):
    properties = p.object_params_xshooter(data_title)
    path = properties['data_dir']

    destination = path + destination
    u.mkdir_check(destination)
    origin = path + origin
    u.mkdir_check(origin)

    dirs = next(os.walk(origin))[1]

    left = 27
    right = 537
    top = 526
    bottom = 15

    for fil in dirs:
        u.mkdir_check(destination + fil)
        print('HERE:')
        print(origin + fil)
        files = os.listdir(origin + fil)
        files.sort()

        for i, file in enumerate(files):
            # Split the files into upper CCD and lower CCD, with even-numbered being upper and odd-numbered being lower
            new_path = destination + fil + "/" + file.replace(
                ".fits", "_trim.fits")
            # Add GAIN and SATURATE keywords to headers.
            path = origin + fil + "/" + file
            f.trim_file(path,
                        left=left,
                        right=right,
                        top=top,
                        bottom=bottom,
                        new_path=new_path)

    copyfile(origin + data_title + ".log", destination + data_title + ".log")
    u.write_log(path=destination + data_title + ".log",
                action='Edges trimmed using 4-trim.py\n')
Beispiel #4
0
def main(data_title, sextractor_path, origin, destination):
    properties = p.object_params_xshooter(data_title)
    data_dir = properties['data_dir']
    if sextractor_path is not None:
        if not os.path.isdir(sextractor_path):
            os.mkdir(sextractor_path)
        do_sextractor = True
    else:
        do_sextractor = False

    origin_path = data_dir + origin
    destination_path = data_dir + destination
    u.mkdir_check(destination_path)
    filters = next(os.walk(origin_path))[1]

    for fil in filters:
        u.mkdir_check(destination_path + fil)
        if do_sextractor:
            if not os.path.isdir(sextractor_path + fil):
                os.mkdir(sextractor_path + fil)
        files = os.listdir(origin_path + fil + "/")
        for file_name in files:
            if file_name[-5:] == '.fits':
                science_origin = origin_path + fil + "/" + file_name
                science_destination = destination_path + fil + "/" + file_name
                print(science_origin)
                # Divide by exposure time to get an image in counts/second.
                f.divide_by_exp_time(file=science_origin,
                                     output=science_destination)
                if do_sextractor:
                    copyfile(science_origin,
                             sextractor_path + fil + "/" + file_name)

    if os.path.isfile(origin_path + data_title + '.log'):
        copyfile(origin_path + data_title + '.log',
                 destination_path + data_title + ".log")
    u.write_log(path=destination_path + data_title + ".log",
                action=f'Divided by exposure time.')
Beispiel #5
0
def main(epoch, origin, destination):
    print("\nExecuting Python script pipeline_fors2/4.1-insert_test_synth.py, with:")
    print(f"\tepoch {epoch}")
    print(f"\torigin directory {origin}")
    print(f"\tdestination directory {destination}")
    print()

    epoch_params = p.object_params_fors2(obj=epoch)
    outputs = p.object_output_params(obj=epoch, instrument='FORS2')

    data_dir = epoch_params['data_dir']

    insert = epoch_params['test_synths']

    origin_path = data_dir + "analysis/sextractor/" + origin
    destination_path = data_dir + destination

    u.mkdir_check(destination_path)
    u.mkdir_check(destination_path + "science/")
    u.mkdir_check(destination_path + "backgrounds/")

    filters = outputs['filters']

    for fil in filters:
        f = fil[0]
        path_fil_output = destination_path + "science/" + fil + "/"
        path_fil_input = origin_path + fil + "/"
        u.mkdir_check(path_fil_output)
        u.mkdir_check(destination_path + "backgrounds/" + fil)
        zeropoint, _, airmass, _, extinction, _ = ph.select_zeropoint(obj=epoch,
                                                                      filt=fil,
                                                                      instrument='fors2',
                                                                      outputs=outputs)

        print(path_fil_input)
        # print(os.listdir(path_fil_input))

        for fits_file in filter(lambda f: f.endswith("_norm.fits"), os.listdir(path_fil_input)):
            print(fits_file)
            path_fits_file_input = path_fil_input + fits_file
            path_fits_file_output = path_fil_output + fits_file
            path_psf_model = path_fits_file_input.replace(".fits", "_psfex.psf")

            try:
                ph.insert_point_sources_to_file(file=path_fits_file_input,
                                                x=array(insert["ra"]),
                                                y=array(insert["dec"]),
                                                mag=insert[f"{f}_mag"],
                                                output=path_fits_file_output,
                                                zeropoint=zeropoint,
                                                extinction=extinction,
                                                airmass=airmass,
                                                world_coordinates=True,
                                                psf_model=path_psf_model
                                                )
            except ValueError:
                ph.insert_point_sources_to_file(file=path_fits_file_input,
                                                x=array(insert["ra"]),
                                                y=array(insert["dec"]),
                                                mag=insert[f"{f}_mag"],
                                                output=path_fits_file_output,
                                                zeropoint=zeropoint,
                                                extinction=extinction,
                                                airmass=airmass,
                                                world_coordinates=True,
                                                fwhm=fits.open(path_psf_model)[1].header['PSF_FWHM']
                                                )

    if os.path.isfile(origin_path + epoch + '.log'):
        copyfile(origin_path + epoch + '.log', destination_path + epoch + ".log")
    u.write_log(path=destination_path + epoch + ".log", action=f'Divided by exposure time.')
Beispiel #6
0
def main(data_dir, data_title, origin, destination, all_synths):
    print("\nExecuting Python script pipeline_fors2/5-background_subtract.py, with:")
    print(f"\tepoch {data_title}")
    print(f"\torigin directory {origin}")
    print(f"\tdestination directory {destination}")
    print()

    methods = ["ESO backgrounds only", "SExtractor backgrounds only", "polynomial fit", "Gaussian fit", "median value"]

    if all_synths:
        frame = 56
        method = "polynomial fit"
        degree = 5
        do_mask = True
        local = True
        global_sub = False
        trim_image = False
        recorrect_subbed = True
        eso_back = False

    else:
        frame = 200
        # frame_arcsec = 30 * units.arcsec
        # frame_deg = frame_arcsec.to(units.deg)

        eso_back = False

        _, method = u.select_option(message="Please select the background subtraction method.", options=methods,
                                 default="polynomial fit")
        degree = None
        if method == "polynomial fit":
            degree = u.user_input(message=f"Please enter the degree of {method} to use:", typ=int, default=3)
        elif method == "ESO backgrounds only":
            eso_back = True
        do_mask = False
        if method not in ["ESO backgrounds only", "SExtractor backgrounds only", "median value"]:
            do_mask = u.select_yn(message="Mask sources using SExtractor catalogue?", default=True)
        if method in ["polynomial fit", "Gaussian fit"]:
            local = u.select_yn(message="Use a local fit?", default=True)
        else:
            local = False
        global_sub = False
        trim_image = False
        recorrect_subbed = False
        if local:
            global_sub = u.select_yn(message="Subtract local fit from entire image?", default="n")
            if not global_sub:
                trim_image = u.select_yn(message="Trim images to subtracted region?", default="y")
                recorrect_subbed = u.select_yn(message="Re-normalise background of subtracted region?", default="y")

        # if not eso_back and method != "SExtractor backgrounds only":
        #     eso_back = u.select_yn(message="Subtract ESO Reflex fitted backgrounds first?", default=False)

    outputs = p.object_output_params(data_title, instrument='FORS2')

    data_dir = u.check_trailing_slash(data_dir)

    destination = u.check_trailing_slash(destination)
    destination = data_dir + destination
    u.mkdir_check_nested(destination)

    origin = u.check_trailing_slash(origin)
    science_origin = data_dir + origin + "science/"
    print(science_origin)

    filters = outputs['filters']
    frb_params = p.object_params_frb(obj=data_title[:-2])
    epoch_params = p.object_params_fors2(obj=data_title)

    background_origin_eso = ""
    if eso_back:
        background_origin_eso = data_dir + "/" + origin + "/backgrounds/"

    if method == "SExtractor backgrounds only":
        background_origin = f"{data_dir}{origin}backgrounds_sextractor/"
    elif method == "polynomial fit":
        background_origin = f"{destination}backgrounds/"  # f"{destination}backgrounds_{method.replace(' ', '')}_degree_{degree}_local_{local}_globalsub_{global_sub}/"
    else:
        background_origin = f"{destination}backgrounds/"  # f"{destination}backgrounds_{method.replace(' ', '')}_local_{local}_globalsub_{global_sub}/"

    trimmed_path = ""
    if trim_image:
        trimmed_path = f"{data_dir}{origin}trimmed_to_background/"
        u.mkdir_check_nested(trimmed_path)

    ra = frb_params["burst_ra"]
    dec = frb_params["burst_dec"]

    if all_synths:
        ras = epoch_params["test_synths"]["ra"]
        decs = epoch_params["test_synths"]["dec"]
    else:
        ras = [ra]
        decs = [dec]

    for fil in filters:
        trimmed_path_fil = ""
        if trim_image:
            trimmed_path_fil = f"{trimmed_path}{fil}/"
            u.mkdir_check(trimmed_path_fil)
        background_fil_dir = f"{background_origin}{fil}/"
        u.mkdir_check_nested(background_fil_dir)
        science_destination_fil = f"{destination}science/{fil}/"
        u.mkdir_check_nested(science_destination_fil)
        files = os.listdir(science_origin + fil + "/")
        for file_name in files:
            if file_name.endswith('.fits'):
                new_file = file_name.replace("norm", "bg_sub")
                new_path = f"{science_destination_fil}/{new_file}"
                print("NEW_PATH:", new_path)
                science = science_origin + fil + "/" + file_name
                # First subtract ESO Reflex background images
                # frame = (frame_deg / f.get_pixel_scale(file=science, astropy_units=True)[1]).to(f.pix).value
                if eso_back:
                    background_eso = background_origin_eso + fil + "/" + file_name.replace("SCIENCE_REDUCED",
                                                                                           "PHOT_BACKGROUND_SCI")

                    ff.subtract_file(file=science, sub_file=background_eso, output=new_path)
                    science_image = new_path

                if method != "ESO backgrounds only":

                    print(ra, dec)

                    print("Science image:", science)
                    science_image = fits.open(science)
                    print("Science file:", science_image)
                    wcs_this = WCS(header=science_image[0].header)

                    if method == "SExtractor backgrounds only":
                        background = background_origin + fil + "/" + file_name + "_back.fits"
                        print("Background image:", background)
                    else:
                        if method == "median value":
                            print(science_image[0].data.shape)
                            _, background_value, _ = sigma_clipped_stats(science_image[0].data)
                            background = deepcopy(science_image)

                            background[0].data = np.full(shape=science_image[0].data.shape, fill_value=background_value)
                            background_path = background_origin + fil + "/" + file_name.replace("SCIENCE_REDUCED",
                                                                                                "PHOT_BACKGROUND_MEDIAN")

                            # Next do background fitting.
                        else:

                            background = deepcopy(science_image)
                            background[0].data = np.zeros(background[0].data.shape)
                            background_path = background_origin + fil + "/" + file_name.replace("SCIENCE_REDUCED",
                                                                                                "PHOT_BACKGROUND_FITTED")

                            for i, ra in enumerate(ras):
                                dec = decs[i]
                                x, y = wcs_this.all_world2pix(ra, dec, 0)
                                print(x, y)

                                bottom, top, left, right = ff.subimage_edges(data=science_image[0].data, x=x, y=y,
                                                                             frame=frame)

                                if do_mask:
                                    # Produce a pixel mask that roughly masks out the true sources in the image so that
                                    # they don't get fitted.
                                    mask_max = 10
                                    _, pixel_scale = ff.get_pixel_scale(science_image)
                                    sextractor = Table.read(
                                        f"{data_dir}analysis/sextractor/4-divided_by_exp_time/{fil}/{file_name.replace('.fits', '_psf-fit.cat')}",
                                        format='ascii.sextractor')
                                    weights = np.ones(shape=science_image[0].data.shape)

                                    for obj in filter(
                                            lambda o: left < o["X_IMAGE"] < right and bottom < o["Y_IMAGE"] < top,
                                            sextractor):
                                        mask_rad = min(int(obj["A_WORLD"] * obj["KRON_RADIUS"] / pixel_scale), mask_max)
                                        x_prime = int(np.round(obj["X_IMAGE"]))
                                        y_prime = int(np.round(obj["Y_IMAGE"]))
                                        weights[y_prime - mask_rad:y_prime + mask_rad,
                                        x_prime - mask_rad:x_prime + mask_rad] = 0.0

                                    plt.imshow(weights, origin="lower")
                                    plt.savefig(
                                        background_origin + fil + "/" + file_name.replace("norm.fits", "mask.png"))
                                else:
                                    weights = None

                                background_this = fit_background_fits(image=science_image,
                                                                      model_type=method[:method.find(" ")],
                                                                      deg=degree, local=local,
                                                                      global_sub=global_sub,
                                                                      centre_x=x, centre_y=y, frame=frame,
                                                                      weights=weights)
                                background[0].data += background_this[0].data

                                if recorrect_subbed:
                                    offset = get_median_background(image=science,
                                                                   ra=epoch_params["renormalise_centre_ra"],
                                                                   dec=epoch_params["renormalise_centre_dec"], frame=50,
                                                                   show=False,
                                                                   output=new_path[
                                                                          :new_path.find("bg_sub")] + "renorm_patch_")
                                    print("RECORRECT_SUBBED:", recorrect_subbed)
                                    print("SUBTRACTING FROM BACKGROUND:", offset)
                                    print(bottom, top, left, right)
                                    print(background[0].data[bottom:top, left:right].shape)
                                    print(np.median(background[0].data[bottom:top, left:right]))
                                    background[0].data[bottom:top, left:right] -= offset
                                    print(np.median(background[0].data[bottom:top, left:right]))

                                if trim_image:
                                    print("TRIMMED_PATH_FIL:", trimmed_path_fil)

                                    science_image = ff.trim_file(path=science_image, left=left, right=right, top=top,
                                                                 bottom=bottom,
                                                                 new_path=trimmed_path_fil + file_name.replace(
                                                                     "norm.fits",
                                                                     "trimmed_to_back.fits"))
                                    print("Science after trim:", science_image)

                                    background = ff.trim_file(path=background, left=left, right=right, top=top,
                                                              bottom=bottom,
                                                              new_path=background_path)

                            print("Writing background to:")
                            print(background_path)
                            background.writeto(background_path, overwrite=True)

                    print("SCIENCE:", science_image)
                    print("BACKGROUND:", background)

                    subbed = ff.subtract_file(file=science_image, sub_file=background, output=new_path)

                    # # TODO: check if regions overlap
                    #
                    # plt.hist(subbed[0].data[int(y - frame + 1):int(y + frame - 1),
                    #          int(x - frame + 1):int(x + frame - 1)].flatten(),
                    #          bins=10)
                    # plt.savefig(new_path[:new_path.find("bg_sub")] + "histplot.png")
                    # plt.close()

    copyfile(data_dir + "/" + origin + "/" + data_title + ".log", destination + data_title + ".log")
    u.write_log(path=destination + data_title + ".log",
                action=f'Backgrounds subtracted using 4-background_subtract.py with method {method}\n')
Beispiel #7
0
def main(data_title: str, show: bool = False):
    properties = p.object_params_xshooter(data_title)
    path = properties['data_dir']

    master_path = path + '/1-master_calibs/'
    reduced_path = path + '/2-reduced/'
    defringed_path = path + '/3-defringed/'

    u.mkdir_check(defringed_path)

    # Define fringe measurement points.

    # high_xs = [267, 267, 267, 267, 267, 267, 267, 267]
    # high_ys = [279, 279, 279, 279, 279, 279, 279, 279]
    # low_xs = [266, 266, 266, 267, 267, 270, 274, 273]
    # low_ys = [293, 295, 298, 301, 303, 305, 303, 292]

    high_xs = [219, 380, 426, 515, 156, 495, 310]
    high_ys = [166, 369, 185, 33, 59, 195, 70]
    low_xs = [219, 380, 424, 474, 160, 500, 315]
    low_ys = [120, 342, 213, 39, 34, 160, 35]

    # n_random = 1000
    #
    # high_xs = np.random.random(n_random)
    # high_xs *= 507
    # high_xs += 29
    # high_xs = np.round(high_xs)
    # high_xs = high_xs.astype(int)
    #
    # high_ys = np.random.random(n_random)
    # high_ys *= 200
    # high_ys += 20
    # high_ys = np.round(high_ys)
    # high_ys = high_ys.astype(int)
    #
    # low_xs = np.random.random(n_random)
    # low_xs *= 507
    # low_xs += 29
    # low_xs = np.round(low_xs)
    # low_xs = low_xs.astype(int)
    #
    # low_ys = np.random.random(n_random)
    # low_ys *= 200
    # low_ys += 20
    # low_ys = np.round(low_ys)
    # low_ys = low_ys.astype(int)

    filters = filter(lambda f: os.path.isdir(reduced_path + f),
                     os.listdir(reduced_path))
    for f in filters:
        print('Constructing fringe map for', f)
        filter_path = reduced_path + f + '/'
        defringed_filter_path = defringed_path + f + '/'
        master_filter_path = master_path + f + '/'
        u.mkdir_check(defringed_filter_path)

        files = list(
            filter(lambda file: file[-5:] == '.fits', os.listdir(filter_path)))
        # Construct fringe map by median-combining science images.
        fringe_map = ff.stack(files,
                              directory=filter_path,
                              output=master_filter_path + 'fringe_map.fits',
                              stack_type='median',
                              inherit=False,
                              show=show,
                              normalise=True)
        fringe_map = fringe_map[0].data
        map_differences = []

        for i in range(len(high_xs)):
            # Take
            high_y = high_ys[i]
            high_x = high_xs[i]
            high_cut = fringe_map[high_y - 1:high_y + 1, high_x - 1:high_x + 1]
            high = np.nanmedian(high_cut)

            low_y = low_ys[i]
            low_x = low_xs[i]
            low_cut = fringe_map[low_y - 1:low_y + 1, low_x - 1:low_x + 1]
            low = np.nanmedian(low_cut)

            map_differences.append(high - low)

        for file in os.listdir(filter_path):
            print(file)
            hdu = fits.open(filter_path + file)
            data = hdu[0].data
            image_differences = []
            factors = []
            for i in range(len(high_xs)):
                high_y = high_ys[i]
                high_x = high_xs[i]
                high_cut = data[high_y - 2:high_y + 2, high_x - 2:high_x + 2]
                high = np.nanmedian(high_cut)

                low_y = low_ys[i]
                low_x = low_xs[i]
                low_cut = data[low_y - 2:low_y + 2, low_x - 2:low_x + 2]
                low = np.nanmedian(low_cut)

                difference = high - low
                image_differences.append(difference)
                factor = difference / map_differences[i]
                factors.append(factor)
            used_factor = np.nanmedian(factors)
            adjusted_map = fringe_map * used_factor
            data = data - adjusted_map
            hdu[0].data = data

            norm = pl.nice_norm(data)
            if show:
                plt.imshow(data, norm=norm, origin='lower')
                plt.show()

            hdu.writeto(defringed_filter_path + file, overwrite=True)

        if show:
            norm = pl.nice_norm(fringe_map)
            plt.imshow(fringe_map, norm=norm, origin='lower')
            plt.scatter(high_xs, high_ys)
            plt.scatter(low_xs, low_ys)
            plt.show()

        copyfile(reduced_path + data_title + ".log",
                 defringed_path + data_title + ".log")
        u.write_log(path=defringed_path + data_title + ".log",
                    action='Edges trimmed using 4-trim.py\n')
Beispiel #8
0
def main(data_title: str, show: bool = False):
    properties = p.object_params_xshooter(data_title)
    path = properties['data_dir']

    raw_path = path + '/0-data_with_raw_calibs/'
    master_path = path + '/1-master_calibs/'
    reduced_path = path + '/2-reduced/'

    u.mkdir_check(raw_path)
    u.mkdir_check(master_path)
    u.mkdir_check(reduced_path)

    files = os.listdir(raw_path)
    biases = []
    flats = {}
    science = {}

    airmasses = {}

    print('Creating lists of files.')

    for file in files:
        if file[-5:] == '.fits':
            hdu = fits.open(raw_path + file)
            header = hdu[0].header
            obj = header['OBJECT']
            f = header['ESO INS FILT1 NAME']
            if 'BIAS' in obj:
                biases.append(file)
            elif 'FLAT' in obj:
                if f not in flats:
                    flats[f] = []
                flats[f].append(file)
            else:
                if f not in science:
                    science[f] = []
                    airmasses[f] = []
                science[f].append(file)

    bias_hdus = []
    for bias in biases:
        bias_hdus.append(bias)
    # Stack biases.
    print(f'Processing biases.')
    ff.stack(bias_hdus, output=master_path + f'master_bias.fits', stack_type='median', directory=raw_path, inherit=False)
    master_bias = CCDData.read(master_path + f'master_bias.fits', unit='du')

    # Loop through filters.
    for f in science:

        flats_filter = flats[f]
        master_path_filter = master_path + f + '/'
        u.mkdir_check(master_path_filter)

        print(f'Processing flats for filter {f}.')
        flats_ccds = []
        for flat in flats_filter:
            flat_ccd = CCDData.read(raw_path + flat, unit='du')
            # Subtract master bias from each flat.
            flat_ccd = ccdproc.subtract_bias(ccd=flat_ccd, master=master_bias)
            flats_ccds.append(flat_ccd)
        # Stack debiased flats.
        master_flat = ff.stack(flats_ccds, output=None, stack_type='median', inherit=False)
        master_flat.writeto(master_path_filter + f'master_flat.fits', overwrite=True)
        master_flat = CCDData.read(master_path_filter + f'master_flat.fits', unit='du')

        science_filter = science[f]

        reduced_path_filter = reduced_path + f + '/'
        u.mkdir_check(reduced_path_filter)
        # Loop through the science images.
        for image in science_filter:
            print(f'Reducing {image}.')
            image_ccd = CCDData.read(raw_path + image, unit='du')
            if show:
                norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                plt.imshow(image_ccd.data, origin='lower', norm=norm)
                plt.title('Unreduced image')
                plt.show()
            # Subtract master bias from science image.
            image_ccd = ccdproc.subtract_bias(image_ccd, master_bias)
            if show:
                norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                plt.imshow(image_ccd.data, origin='lower', norm=norm)
                plt.title('After debiasing')
                plt.show()
            # Divide by master flat.
            image_ccd = ccdproc.flat_correct(image_ccd, master_flat)
            if show:
                norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                plt.imshow(image_ccd.data, origin='lower', norm=norm)
                plt.title('After flatfielding')
                plt.show()
            # Convert back to HDU object for saving.
            image_ccd = image_ccd.to_hdu()
            image_ccd.writeto(reduced_path_filter + image, overwrite=True)
    u.write_log(path=reduced_path + data_title + ".log", action=f'Reduced using 1-reduce.py')
Beispiel #9
0
def main(data_title: str, show: bool = False):
    properties = p.object_params_imacs(data_title)
    path = properties['data_dir']

    raw_path = path + '/0-raw_data_with_calibs/'
    master_path = path + '/1-master_calibs/'
    reduced_path = path + '/2-reduced/'

    u.mkdir_check(raw_path)
    u.mkdir_check(master_path)
    u.mkdir_check(reduced_path)

    for file in os.listdir(path):
        if os.path.isfile(path + file):
            shutil.move(path + file, raw_path + file)

    files = os.listdir(raw_path)

    biases = []
    flats = {}
    science = {}

    airmasses = {}

    print('Creating lists of files.')

    param_dict = {}

    for file in files:
        if file[-5:] == '.fits':
            hdu = fits.open(raw_path + file)
            header = hdu[0].header
            obj = header['OBJECT']
            f = header['FILTER']
            if 'bias' in obj:
                biases.append(file)
            elif 'flat' in obj:
                if f not in flats:
                    flats[f] = []
                flats[f].append(file)
            else:
                if f not in science:
                    science[f] = []
                    airmasses[f] = []
                science[f].append(file)
                airmasses[f].append(header['AIRMASS'])

    # Loop through chip numbers.
    for chip in range(1, 9):
        print(f'Processing biases for chip {chip}.')
        # Get only those bias frames relevant to the current chip.
        biases_chip = list(filter(lambda name: name[-6] == str(chip), biases))
        bias_hdus = []
        for bias in biases_chip:
            # Correct NAXIS1 & 2 in headers; they are too large in the unreduced files, leading to padding by astropy.
            bias = ff.correct_naxis(file=raw_path + bias, x=2048, y=4096, write=False)
            bias_hdus.append(bias)
        # Stack biases.
        print(biases_chip)
        ff.stack(bias_hdus, output=master_path + f'master_bias_c{chip}.fits', stack_type='median', directory=raw_path)
        master_bias = CCDData.read(master_path + f'master_bias_c{chip}.fits', unit='du')

        # Loop through filters.
        for f in science:



            flats_filter = flats[f]
            master_path_filter = master_path + f + '/'
            u.mkdir_check(master_path_filter)

            print(f'Processing flats for chip {chip}, filter {f}.')
            # Get only those flats relevant to the current chip and filter.
            flats_chip = list(filter(lambda name: name[-6] == str(chip), flats_filter))
            flats_chip_ccds = []
            for flat in flats_chip:
                flat_ccd = CCDData.read(raw_path + flat, unit='du')
                # Correct NAXIS1 & 2 in headers; they are too large in the unreduced files, leading to padding by astropy.
                flat_ccd = ff.correct_naxis(file=flat_ccd, x=2048, y=4096, write=False)
                # Subtract master bias from each flat.
                flat_ccd = ccdproc.subtract_bias(ccd=flat_ccd, master=master_bias)
                flats_chip_ccds.append(flat_ccd)
            # Stack debiased flats.
            master_flat = ff.stack(flats_chip_ccds, output=None, stack_type='median')
            master_flat.writeto(master_path_filter + f'master_flat_c{chip}.fits', overwrite=True)
            master_flat = CCDData.read(master_path_filter + f'master_flat_c{chip}.fits', unit='du')

            science_filter = science[f]

            param_dict[f + '_airmass_mean'] = float(np.nanmean(airmasses[f]))
            param_dict[f + '_airmass_err'] = float(2 * np.nanstd(airmasses[f]))
            param_dict[f + '_n_frames'] = len(science_filter)
            param_dict[f + '_n_exposures'] = len(science_filter) / 8

            reduced_path_filter = reduced_path + f + '/'
            u.mkdir_check(reduced_path_filter)
            # Loop through the science images taken on the current chip.
            for image in filter(lambda name: name[-6] == str(chip), science_filter):
                print(f'Reducing {image}.')
                image_ccd = CCDData.read(raw_path + image, unit='du')
                if show:
                    norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                    plt.imshow(image_ccd.data, origin='lower', norm=norm)
                    plt.title('Unreduced image')
                    plt.show()
                # Correct NAXIS1 & 2 in header; they are too large in the unreduced files, leading to padding by astropy.
                image_ccd = ff.correct_naxis(file=image_ccd, x=2048, y=4096, write=False)
                if show:
                    norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                    plt.imshow(image_ccd.data, origin='lower', norm=norm)
                    plt.title('After NAXIS correction')
                    plt.show()
                # Subtract master bias from science image.
                image_ccd = ccdproc.subtract_bias(image_ccd, master_bias)
                if show:
                    norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                    plt.imshow(image_ccd.data, origin='lower', norm=norm)
                    plt.title('After debiasing')
                    plt.show()
                # Divide by master flat.
                image_ccd = ccdproc.flat_correct(image_ccd, master_flat)
                if show:
                    norm = ImageNormalize(image_ccd.data, interval=ZScaleInterval(), stretch=SqrtStretch())
                    plt.imshow(image_ccd.data, origin='lower', norm=norm)
                    plt.title('After flatfielding')
                    plt.show()
                # Convert back to HDU object for saving.
                image_ccd = image_ccd.to_hdu()
                image_ccd.writeto(reduced_path_filter + image, overwrite=True)
    u.write_log(path=reduced_path + data_title + ".log", action=f'Reduced using 1-reduce.py')
    p.add_params(path + '/output_values', param_dict)
def main(origin_dir, output_dir, data_title, sextractor_path):
    print("\nExecuting Python script pipeline_fors2/3-trim.py, with:")
    print(f"\tepoch {data_title}")
    print(f"\torigin directory {origin_dir}")
    print(f"\toutput directory {output_dir}")
    print()

    # If this is None, we don't want the SExtractor components to be performed.
    if sextractor_path is not None:
        if not os.path.isdir(sextractor_path):
            os.mkdir(sextractor_path)
        do_sextractor = True
        print(os.getcwd())
    else:
        do_sextractor = False

    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    if not os.path.isdir(output_dir + "backgrounds/"):
        os.mkdir(output_dir + "backgrounds/")
    if not os.path.isdir(output_dir + "science/"):
        os.mkdir(output_dir + "science/")

    wdir = origin_dir + "backgrounds/"

    epoch_params = p.object_params_fors2(obj=data_title)
    outputs = p.object_output_params(obj=data_title)

    fils = outputs["filters"]

    edged = False

    up_left = 0
    up_right = 0
    up_bottom = 0
    up_top = 0

    dn_left = 0
    dn_right = 0
    dn_bottom = 0
    dn_top = 0

    for fil in fils:
        print(output_dir + "backgrounds/" + fil)
        if not os.path.isdir(output_dir + "backgrounds/" + fil):
            os.mkdir(output_dir + "backgrounds/" + fil)
        print('HERE:')
        print(wdir + fil)
        files = os.listdir(wdir + fil)
        files.sort()
        if not edged:
            # Find borders of noise frame using backgrounds.
            # First, make sure that the background we're using is for the top chip.
            i = 0
            while f.get_chip_num(wdir + fil + "/" + files[i]) != 1:
                i += 1
            up_left, up_right, up_bottom, up_top = f.detect_edges(wdir + fil +
                                                                  "/" +
                                                                  files[i])
            # Ditto for the bottom chip.
            i = 0
            while f.get_chip_num(wdir + fil + "/" + files[i]) != 2:
                i += 1
            dn_left, dn_right, dn_bottom, dn_top = f.detect_edges(wdir + fil +
                                                                  "/" +
                                                                  files[i])
            up_left = up_left + 5
            up_right = up_right - 5
            up_top = up_top - 5
            dn_left = dn_left + 5
            dn_right = dn_right - 5
            dn_bottom = dn_bottom + 5
            print('Upper chip:')
            print(up_left, up_right, up_top, up_bottom)
            print('Lower:')
            print(dn_left, dn_right, dn_top, dn_bottom)

            edged = True

        for i, file in enumerate(files):
            print(f'{i} {file}')

        for i, file in enumerate(files):
            new_path = output_dir + "backgrounds/" + fil + "/" + file.replace(
                ".fits", "_trim.fits")
            # Add GAIN and SATURATE keywords to headers.
            path = wdir + fil + "/" + file

            print(f'{i} {file}')

            # Split the files into upper CCD and lower CCD
            if f.get_chip_num(path) == 1:
                print('Upper Chip:')
                f.trim_file(path,
                            left=up_left,
                            right=up_right,
                            top=up_top,
                            bottom=up_bottom,
                            new_path=new_path)
            elif f.get_chip_num(path) == 2:
                print('Lower Chip:')
                f.trim_file(path,
                            left=dn_left,
                            right=dn_right,
                            top=dn_top,
                            bottom=dn_bottom,
                            new_path=new_path)
            else:
                raise ValueError(
                    'Invalid chip ID; could not trim based on upper or lower chip.'
                )

    # Repeat for science images

    wdir = origin_dir + "science/"

    fils = os.listdir(wdir)

    for fil in fils:
        print(output_dir + "science/" + fil)
        if do_sextractor:
            if not os.path.isdir(sextractor_path + fil):
                os.mkdir(sextractor_path + fil)
        if not os.path.isdir(output_dir + "science/" + fil):
            os.mkdir(output_dir + "science/" + fil)

        files = os.listdir(wdir + fil)
        files.sort()

        for i, file in enumerate(files):
            print(f'{i} {file}')

        for i, file in enumerate(files):
            # Split the files into upper CCD and lower CCD, with even-numbered being upper and odd-numbered being lower
            new_file = file.replace(".fits", "_trim.fits")
            new_path = output_dir + "science/" + fil + "/" + new_file
            path = wdir + fil + "/" + file
            f.change_header(file=path, key='GAIN', value=0.8)
            f.change_header(file=path, key='SATURATE', value=65535.)
            if f.get_chip_num(path) == 1:
                print('Upper Chip:')
                f.trim_file(path,
                            left=up_left,
                            right=up_right,
                            top=up_top,
                            bottom=up_bottom,
                            new_path=new_path)
                if do_sextractor:
                    copyfile(new_path, sextractor_path + fil + "/" + new_file)

            elif f.get_chip_num(path) == 2:
                print('Lower Chip:')
                f.trim_file(path,
                            left=dn_left,
                            right=dn_right,
                            top=dn_top,
                            bottom=dn_bottom,
                            new_path=new_path)

    try:
        copyfile(origin_dir + data_title + ".log",
                 output_dir + data_title + ".log")
    except FileNotFoundError:
        print("Previous log not found.")
    u.write_log(path=output_dir + data_title + ".log",
                action='Edges trimmed using 3-trim.py\n')
def main(data_title, sextractor_path, origin, destination):
    print(
        "\nExecuting Python script pipeline_fors2/4-divide_by_exp_time.py, with:"
    )
    print(f"\tepoch {data_title}")
    print(f"\tsextractor path {sextractor_path}")
    print(f"\torigin directory {origin}")
    print(f"\tdestination directory {destination}")
    print()

    properties = p.object_params_fors2(data_title)
    outputs = p.object_output_params(obj=data_title, instrument='FORS2')

    data_dir = properties['data_dir']
    if sextractor_path is not None:
        if not os.path.isdir(sextractor_path):
            os.mkdir(sextractor_path)
        do_sextractor = True
    else:
        do_sextractor = False

    origin_path = data_dir + origin
    destination_path = data_dir + destination

    u.mkdir_check(destination_path)
    u.mkdir_check(destination_path + "science/")
    u.mkdir_check(destination_path + "backgrounds/")

    filters = outputs['filters']

    for fil in filters:
        u.mkdir_check(destination_path + "science/" + fil)
        u.mkdir_check(destination_path + "backgrounds/" + fil)
        if do_sextractor:
            if not os.path.isdir(sextractor_path + fil):
                os.mkdir(sextractor_path + fil)
        files = os.listdir(origin_path + "science/" + fil + "/")
        for file_name in files:
            if file_name[-5:] == '.fits':
                science_origin = origin_path + "science/" + fil + "/" + file_name
                science_destination = destination_path + "science/" + fil + "/" + file_name.replace(
                    "trim", "norm")

                background_origin = origin_path + "backgrounds/" + fil + "/" + file_name.replace(
                    "SCIENCE_REDUCED", "PHOT_BACKGROUND_SCI")
                background_destination = destination_path + "backgrounds/" + fil + "/" + \
                                         file_name.replace("SCIENCE_REDUCED", "PHOT_BACKGROUND_SCI").replace("trim",
                                                                                                             "norm")

                print(science_origin)
                # Divide by exposure time to get an image in counts/second.
                ff.divide_by_exp_time(file=science_origin,
                                      output=science_destination)
                ff.divide_by_exp_time(file=background_origin,
                                      output=background_destination)
                if do_sextractor:
                    copyfile(
                        science_destination, sextractor_path + fil + "/" +
                        file_name.replace("trim", "norm"))

    if os.path.isfile(origin_path + data_title + '.log'):
        copyfile(origin_path + data_title + '.log',
                 destination_path + data_title + ".log")
    u.write_log(path=destination_path + data_title + ".log",
                action=f'Divided by exposure time.')
def main(data_title: 'str', delete_output: bool = True):
    print(
        "\nExecuting Python script pipeline_fors2/2-sort_after_esoreflex.py, with:"
    )
    print(f"\tepoch {data_title}")
    print()

    eso_dir = p.config['esoreflex_output_dir']
    if os.path.isdir(eso_dir):
        obj_params = p.object_params_fors2(data_title)
        data_dir = obj_params['data_dir']
        destination = data_dir + "2-sorted/"
        date = None
        output_values = p.object_output_params(obj=data_title,
                                               instrument='FORS2')
        mjd = int(output_values['mjd_obs'])
        obj = output_values['object']

        print(
            f"Looking for data with object '{obj}' and MJD of observation {mjd} inside {eso_dir}"
        )
        # Look for files with the appropriate object and MJD, as recorded in output_values

        # List directories in eso_output_dir; these are dates on which data was reduced using ESOReflex.
        eso_dirs = filter(lambda d: os.path.isdir(eso_dir + "/" + d),
                          os.listdir(eso_dir))
        for directory in eso_dirs:
            directory += "/"
            # List directories within 'reduction date' directories.
            # These should represent individual images reduced.
            print(f"Searching {eso_dir + directory}")
            eso_subdirs = filter(
                lambda d: os.path.isdir(eso_dir + directory + d) and "FORS2" in
                d, os.listdir(eso_dir + directory))
            for subdirectory in eso_subdirs:
                subdirectory += "/"
                subpath = eso_dir + "/" + directory + subdirectory
                print(f"\tSearching {subpath}")
                # Get the files within the image directory.
                files = filter(lambda d: os.path.isfile(subpath + d),
                               os.listdir(subpath))
                for file_name in files:
                    # Retrieve the target object name from the fits file.
                    file_path = subpath + file_name
                    file = fits.open(file_path)
                    file_obj = ff.get_object(file)
                    file_mjd = int(ff.get_header_attribute(file, 'MJD-OBS'))
                    file_filter = ff.get_filter(file)
                    # Check the object name and observation date against those of the epoch we're concerned with.
                    if file_obj == obj and file_mjd == mjd:
                        # Check which type of file we have.
                        date = directory[:-1]
                        if file_name[-28:] == "PHOT_BACKGROUND_SCI_IMG.fits":
                            file_destination = f"{destination}/backgrounds/"
                            suffix = "PHOT_BACKGROUND_SCI_IMG.fits"
                        elif file_name[-25:] == "OBJECT_TABLE_SCI_IMG.fits":
                            file_destination = f"{destination}/obj_tbls/"
                            suffix = "OBJECT_TABLE_SCI_IMG.fits"
                        elif file_name[-24:] == "SCIENCE_REDUCED_IMG.fits":
                            file_destination = f"{destination}/science/"
                            suffix = "SCIENCE_REDUCED_IMG.fits"
                        else:
                            file_destination = f"{destination}/sources/"
                            suffix = "SOURCES_SCI_IMG.fits"
                        # Make this directory, if it doesn't already exist.
                        u.mkdir_check(file_destination)
                        # Make a subdirectory by filter.
                        file_destination += file_filter + "/"
                        u.mkdir_check(file_destination)
                        # Title new file.
                        file_destination += f"{data_title}_{subdirectory[:-1]}_{suffix}"
                        # Copy file to new location.
                        print(
                            f"Copying: {file_path} to \n\t {file_destination}")
                        file.writeto(file_destination, overwrite=True)
                        if delete_output and os.path.isfile(file_destination):
                            os.remove(file_path)

        if not os.path.isfile(f"{destination}/{data_title}.log"):
            sh.copy(f"{data_dir}0-data_with_raw_calibs/{data_title}.log",
                    f"{destination}/{data_title}.log")
        u.write_log(path=destination + data_title + ".log",
                    action=f'Data reduced with ESOReflex.',
                    date=date)
        u.write_log(path=destination + data_title + ".log",
                    action='Files sorted using 2-sort_after_esoreflex.sh')

    else:
        print(f"ESO output directory '{eso_dir}' not found.")
        exit(1)