Ejemplo n.º 1
0
def prepare_for_spec(cluster_obj: cluster.ClusterObj):
    try:
        import ciao
    except ImportError:
        print("Must be running CIAO before running prepare_for_spec.")
        raise
    io.make_directory(cluster_obj.super_comp_dir)
    #io.make_directory(cluster_obj.sherpa_save_dir)
    cluster_obj.initialize_best_fits_file()
    print("Preparing files for spectral analysis and copying to {super_comp_dir} for offloading computation.".format(
        super_comp_dir=cluster_obj.super_comp_dir
    ))
    io.copy(cluster_obj.configuration_filename, cluster_obj.super_comp_cluster_config)
    for observation in cluster_obj.observations:
        print("Copying files for {obsid}".format(obsid=observation.id))
        io.copy(observation.clean, cluster_obj.acisI_clean_obs(observation.id))
        io.copy(observation.back, cluster_obj.backI_clean_obs(observation.id))
        io.copy(observation.aux_response_file, observation.arf_sc)
        io.copy(observation.redistribution_matrix_file, observation.rmf_sc)
        #io.copy(observation.effdtime, cluster_obj.effdtime_file_obs(observation.id))
        #io.copy(observation.effbtime, cluster_obj.effbtime_file_obs(observation.id))
        #io.copy(observation.scale_map_region_list, cluster_obj.scalemap_regionlist_file_obs(observation.id))
        io.copy(observation.acis_mask, observation.acis_mask_sc)

        exposure = ciao.get_exposure(observation.clean)

        io.write_contents_to_file(exposure, observation.exposure_time_file, binary=False)
Ejemplo n.º 2
0
 def initialize_cluster(self):
     print("Initializing cluster object")
     self.get_cluster_info_from_user()
     io.make_directory(self.directory)
     self.write_cluster_data()
     io.make_initial_directories(self)
     print("Initialization complete.")  # Next step is to run the following command: ")
Ejemplo n.º 3
0
def runpipe5(cluster):
    # This portion of the pypeline
    combined_dir = cluster.combined_directory

    io.make_directory(combined_dir)

    while not io.file_exists(cluster.master_crop_file):
        print("Master crop file not found")
        run_ds9_for_master_crop(cluster)

    for observation in cluster.observations:

        # clean data
        infile = "{}[sky=region({})]".format(observation.clean,
                                             cluster.master_crop_file)
        outfile = cluster.temp_acisI_comb
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        infile = "{}[bin sky=4][energy=700:8000]".format(
            cluster.temp_acisI_comb)
        outfile = observation.acisI_comb_img
        clobber = True

        print("ObsID: {}\t- Extracting just 0.7keV - 8keV.".format(
            observation.id))
        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        # background
        infile = "{}[sky=region({})]".format(observation.back,
                                             cluster.master_crop_file)
        outfile = cluster.temp_backI_comb
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        infile = "{}[bin sky=4][energy=700:8000]".format(
            cluster.temp_backI_comb)
        outfile = observation.backI_comb_img
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        io.delete(cluster.temp_acisI_comb)
        io.delete(cluster.temp_backI_comb)

        make_mask_file(observation)
        make_cumulative_mask_file(cluster, observation)

    create_combined_images(cluster)
    make_nosrc_cropped_xray_sb(cluster)
Ejemplo n.º 4
0
def runpipe5(cluster):
    print("runpipe5")
    from astropy.io import fits
    # This portion of the pypeline 
    combined_dir = cluster.combined_directory

    io.make_directory(combined_dir)

    while not io.file_exists(cluster.master_crop_file):
        print("Master crop file not found")
        run_ds9_for_master_crop(cluster)

    # the contents of this for loop should be refactored/replaced with the make_acisI_and_back function
    for observation in cluster.observations:
        infile = "{}[sky=region({})]".format(observation.clean, cluster.master_crop_file)
        outfile = cluster.temp_acisI_comb
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        print("{} shape: {}".format(outfile, fits.open(outfile)[0].shape))

        infile = "{}[bin sky=4][energy=700:8000]".format(cluster.temp_acisI_comb)
        outfile = observation.acisI_comb_img
        clobber = True

        print("ObsID: {}\t- Extracting just 0.7keV - 8keV.".format(observation.id))
        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        # background
        infile = "{}[sky=region({})]".format(observation.back, cluster.master_crop_file)
        outfile = cluster.temp_backI_comb
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        infile = "{}[bin sky=4][energy=700:8000]".format(cluster.temp_backI_comb)
        outfile = observation.backI_comb_img
        clobber = True

        rt.dmcopy.punlearn()
        rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)

        io.delete(cluster.temp_acisI_comb)
        io.delete(cluster.temp_backI_comb)

        make_mask_file(observation)
        make_cumulative_mask_file(cluster, observation)

    create_combined_images(cluster)
    make_nosrc_cropped_xray_sb(cluster)
Ejemplo n.º 5
0
def prepare_to_merge_observations_from(cluster_obj):
    print("Preparing to merge the observations.")

    for observation in cluster_obj.observations:
        print("Preparing observation: {id}".format(id=observation.id))
        io.make_directory(observation.analysis_directory)
        io.make_directory(observation.reprocessing_directory)
        io.copytree(src=observation.primary_directory, dst=observation.analysis_directory)
        io.copytree(src=observation.secondary_directory, dst=observation.analysis_directory)

    return
Ejemplo n.º 6
0
def initialize_cluster(name="", obsids=[], abundance=0.3, redshift=0.0, nH=0.0):
    clstr = cluster.ClusterObj(name=name, observation_ids=obsids, abundance=abundance,
                               redshift=redshift, hydrogen_column_density=nH,
                               data_directory=config.data_directory())
    print('Making initial cluster directory: {}'.format(clstr.directory))
    io.make_directory(clstr.directory)
    io.make_initial_directories(clstr)
    clstr.last_step_completed = 1
    print("Downloading cluster data.")
    download_data(clstr)
    clstr.last_step_completed = 2
    print("Merging observations.")
    merge_observations(clstr)
    clstr.last_step_completed = 3
Ejemplo n.º 7
0
def actually_merge_observations_from(cluster):
    print("Merging observations from {}.".format(cluster.name))

    merged_directory = io.get_path('{}/merged_obs_evt2/'.format(
        cluster.directory))

    io.make_directory(merged_directory)

    os.chdir(merged_directory)
    merged_observations = []

    for observation in cluster.observation_list:
        evt2_file = "{}/{}/analysis/evt2.fits".format(cluster.directory,
                                                      observation)
        merged_observations.append(evt2_file)

    merged_lis = "{}/merged_obs.lis".format(merged_directory)
    io.write_contents_to_file("\n".join(merged_observations),
                              merged_lis,
                              binary=False)
    outroot = io.get_path("{}/{}".format(cluster.directory, cluster.name))

    infile = "@{infile}[ccd_id=0:3]".format(infile=merged_lis)  # for ACIS-I
    # infile = "@{infile}".format(infile=merged_lis) # for ACIS-I & ACIS-S

    xygrid = "1500:6500:4,1500:6500:4"

    if len(merged_observations) == 1:
        rt.fluximage.punlearn()
        rt.fluximage(infile=infile,
                     outroot=outroot,
                     xygrid=xygrid,
                     clobber=True)
        print("Only single observation, flux image created.")

    elif len(merged_observations) > 1:
        rt.merge_obs.punlearn()
        rt.merge_obs(infiles=infile,
                     outroot=outroot,
                     xygrid=xygrid,
                     clobber=True,
                     parallel=True,
                     nproc=12)
Ejemplo n.º 8
0
def automated_cluster_init(batch_file):
    print("Automated cluster initialization using: {batch_file}".format(batch_file=batch_file))
    data_directory = config.data_directory()
    csv_clusters = io.get_cluster_info_from_csv(batch_file)
    for clstr in csv_clusters:
        cluster_obj = cluster.ClusterObj(name=clstr['name'],
                                         observation_ids=clstr['obsids'],
                                         data_directory=data_directory,
                                         abundance=clstr['abundance'],
                                         redshift=clstr['redshift'],
                                         hydrogen_column_density=clstr['hydrogen_column_density']
                                         )

        io.make_directory(cluster_obj.directory)
        cluster_obj.write_cluster_data()
        io.make_initial_directories(cluster_obj)
        cluster_obj.last_step_completed = 1
        download_data(cluster_obj)
        cluster_obj.last_step_completed = 2
        merge_observations(cluster_obj)
        cluster_obj.last_step_completed = 3
Ejemplo n.º 9
0
def make_temperature_map(cluster: cluster.ClusterObj, resolution, average=False):

    #coordinates = get_pixel_coordinates(cluster)
    # indeces of this array are the region number minus 1
    # that is, region number 1 is coordinate array index 0
    # region 100 = coordinates[99]

    # high_res_offset = 0
    # med_res_offset = 1
    # low_res_offset = 2

    io.make_directory(cluster.output_dir)

    offset = [None, 2, 1, 0][resolution]

    mask_fits = fits.open(cluster.combined_mask)
    mask = mask_fits[0].data

    scale_map_regions = cluster.scale_map_region_index
    temps_with_errors = cluster.average_temperature_fits if average else cluster.temperature_fits

    temperature_map = np.zeros(mask.shape)
    temperature_error_map = np.zeros(mask.shape)
    temperature_fractional_error_map = np.zeros(mask.shape)

    regions = temps_with_errors['region']
    temperatures = temps_with_errors['temperature']
    temp_error_plus = temps_with_errors['temp_err_plus']
    temp_error_minus = temps_with_errors['temp_err_minus']
    for i, region in enumerate(regions):
        if i % 1000 == 0:
            _update_completed_things(i, len(regions), "regions")
        coordinates = cluster.coordinates_for_scale_map_region(region, scale_map_regions)
        x = int(coordinates[0])
        y = int(coordinates[1])
        low_x = x - offset
        high_x = x + offset + 1
        low_y = y - offset
        high_y = y + offset + 1

        temperature_map[low_x:high_x, low_y:high_y] = temperatures[i]
        temperature_error_map[low_x:high_x, low_y:high_y] = (np.abs(temp_error_plus[i] -
                                                             temp_error_minus[i]))/2
        temperature_fractional_error_map[low_x:high_x, low_y:high_y] = \
            (temperature_error_map[x,y]/temperature_map[x,y])*100

        _update_completed_things(i, len(regions), "regions")

    header = mask_fits[0].header
    # This header contains all coordinate information needed
    fits.writeto(cluster.temperature_map_filename,
                 temperature_map,
                 header,
                 overwrite=True)

    fits.writeto(cluster.temperature_error_map_filename,
                 temperature_error_map,
                 header,
                 overwrite=True)

    fits.writeto(cluster.temperature_fractional_error_map_filename,
                 temperature_fractional_error_map,
                 header,
                 overwrite=True)
Ejemplo n.º 10
0
def create_scale_map(cluster):
    target_sn = cluster.signal_to_noise

    mask = cluster.combined_mask_data

    cts_image = np.zeros(mask.shape)
    back_rescale = np.zeros(mask.shape)

    for obs in cluster.observations:
        cts_image += obs.acisI_combined_image
        t_obs = obs.acisI_combined_image_header['EXPOSURE']

        t_back = obs.backI_combined_image_header['EXPOSURE']

        back_rescale += (t_obs/t_back)*obs.backI_combined_image

    signal = cts_image - back_rescale

    signal[np.where(signal < 0)] = 0

    sz = signal.shape
    max_x = sz[0]
    max_y = sz[1]

    # radius_map = np.zeros(sz)

    scale_map = np.zeros(sz)
    sn_map = np.zeros(sz)

    pix_x = np.zeros(sz)
    pix_y = np.zeros(sz)

    for j in range(max_y):
        for i in range(max_x):
            pix_x[i, j] = float(i)
            pix_y[i, j] = float(j)

    max_radius = 100
    r = max_radius + 1  # +1 may be a remnant of IDL indexing
    #dr = 24.0
    min_dr = 0.125

    num_pix = max_x * max_y

    ci = 0
    counter = 0
    start_time = time.time()


    for cj in range(0, max_y):
        #print("{} out of {} pixels complete.".format(cj*ci, num_pix))
        _update_completed_things(cj*ci, num_pix, "pixels")
        for ci in range(0, max_x):
            if mask[ci, cj] == 1:
                delta_x = ci-pix_x
                delta_y = cj-pix_y

                # an array where each value is the distance away from ci,cj
                radius = np.sqrt(delta_x**2 + delta_y**2)

                dr = 24.0
                hilo = 0
                niter = 0

                while (dr > min_dr) and (niter < 100):
                    indeces = np.where(radius <= r)
                    cts_map_total = np.sum(cts_image[indeces])

                    if cts_map_total == 0:
                        counter += 1
                        _source_free_region(counter, ci*cj, num_pix)
                        sn_val = 0
                        hilo = -1
                    else:
                        backmap_tot = np.sum(back_rescale[indeces])
                        signal_total = cts_map_total - backmap_tot
                        noise_total = np.sqrt(cts_map_total+backmap_tot)
                        sn_val = signal_total/noise_total
                    if float(sn_val) < float(target_sn):
                        if r > max_radius:
                            r = max_radius + 1

                            niter = 110
                            # exit by setting niter=110.
                            # (niter=100 means niter hit max niter.
                            # niter=110 means radius hit max radius)

                            sn_map[ci,cj] = 0
                            scale_map[ci,cj] = 0
                        else:
                            if hilo == 1:
                                dr *= 0.5
                            r += dr
                            hilo = -1
                    else:
                        snmapval = sn_map[ci, cj]
                        if (sn_val < snmapval) or (snmapval == 0.0):
                            sn_map[ci,cj] = sn_val
                            scale_map[ci, cj] = r
                        if hilo == -1:
                            dr *= 0.5
                        r -= dr
                        hilo = 1

                    niter += 1
                # end while

    end_time = time.time()
    print("Time elapsed {:0.2f} seconds.".format(end_time - start_time))

    io.make_directory(cluster.acb_dir)

    header = cluster.observations[0].acisI_combined_image_header

    io.write_numpy_array_to_fits(scale_map, cluster.scale_map_file, header=header)
    io.write_numpy_array_to_fits(sn_map, cluster.sn_map, header=header)
Ejemplo n.º 11
0
def create_scale_map_in_parallel(cluster: cluster.ClusterObj):
    mask = cluster.combined_mask_data

    cts_image = np.zeros(mask.shape)
    back_rescale = np.zeros(mask.shape)

    for obs in cluster.observations:
        cts_image += obs.acisI_combined_image
        t_obs = obs.acisI_combined_image_header['EXPOSURE']

        t_back = obs.backI_combined_image_header['EXPOSURE']

        back_rescale += (t_obs / t_back) * obs.backI_combined_image

    signal = cts_image - back_rescale

    signal[np.where(signal < 0)] = 0

    sz = signal.shape
    max_x = sz[0]
    max_y = sz[1]

    io.make_directory(cluster.acb_dir)
    cluster.initialize_scale_map_csv()

    pix_x = np.zeros(sz)
    pix_y = np.zeros(sz)

    for j in range(max_y):
        for i in range(max_x):
            pix_x[i, j] = float(i)
            pix_y[i, j] = float(j)

    num_pix = max_x * max_y

    start_time = time.time()

    indices = np.vstack(np.where(mask==1)).T
    num_index_lists = (indices.shape[0] // mp.cpu_count())
    index_lists = np.array_split(indices, num_index_lists)

    num_iterations = len(index_lists)

    for i, index_list in enumerate(index_lists):
        if i % 100 == 0:
            print("{} of {} iterations complete.".format(i, num_iterations))

        processes = [mp.Process(target=calculate_radius_at_index,
                                args=(index, cluster, pix_x, pix_y, cts_image, num_pix, back_rescale))
                     for index in index_list]

        for process in processes:
            process.start()

        for process in processes:
            process.join()

    cluster.write_scale_map_csv_to_fits()

    end_time = time.time()
    print("Time elapsed {:0.2f} seconds.".format(end_time - start_time))
Ejemplo n.º 12
0
def create_global_response_file_for(cluster, obsid, region_file):
    observation = cluster.observation(obsid)
    #min_counts = 525

    obs_analysis_dir = observation.analysis_directory
    global_response_dir = "{}/globalresponse/".format(obs_analysis_dir)
    io.make_directory(global_response_dir)

    clean = observation.clean
    back = observation.back

    pbk0 = io.get_filename_matching("{}/acis*pbk0*.fits".format(obs_analysis_dir))[0]
    bad_pixel_file = io.get_filename_matching("{}/bpix1_new.fits".format(obs_analysis_dir))[0]

    rt.ardlib.punlearn()

    rt.acis_set_ardlib(badpixfile=bad_pixel_file)

    mask_file = io.get_filename_matching("{}/*msk1.fits".format(obs_analysis_dir))

    make_pcad_lis(cluster, obsid)

    infile = "{}[sky=region({})]".format(clean, region_file)
    outroot = "{}/acisI_region_0".format(global_response_dir)
    weight = True
    correct_psf = False
    pcad = "@{}/pcad_asol1.lis".format(obs_analysis_dir)
    combine = False
    bkg_file = ""
    bkg_resp = False
    group_type = "NUM_CTS"
    binspec = 1
    clobber = True

    rt.specextract(infile=infile, outroot=outroot, weight=weight, correctpsf=correct_psf,
                   asp=pcad, combine=combine, mskfile=mask_file, bkgfile=bkg_file, bkgresp=bkg_resp,
                   badpixfile=bad_pixel_file, grouptype=group_type, binspec=binspec, clobber=clobber)

    infile = "{}[sky=region({})][bin pi]".format(back, region_file)
    outfile = "{}/acisI_back_region_0.pi".format(global_response_dir)
    clobber = True

    rt.dmextract.punlearn()
    print("Running: dmextract infile={}, outfile={}, clobber={}".format(infile, outfile, clobber))
    rt.dmextract(infile=infile, outfile=outfile, clobber=clobber)

    rt.dmhedit.punlearn()
    infile = "{}/acisI_region_0.pi".format(global_response_dir)
    filelist = ""
    operation = "add"
    key = "BACKFILE"
    value = outfile

    rt.dmhedit(infile=infile, filelist=filelist, operation=operation, key=key, value=value)

    observation = cluster.observation(obsid)

    aux_response_file = '{global_response_directory}/acisI_region_0.arf'.format(
        global_response_directory=observation.global_response_directory)

    redist_matrix_file = '{global_response_directory}/acisI_region_0.rmf'.format(
        global_response_directory=observation.global_response_directory)

    io.copy(aux_response_file, observation.aux_response_file)
    io.copy(redist_matrix_file, observation.redistribution_matrix_file)
Ejemplo n.º 13
0
def create_scale_map(cluster):
    from astropy.io import fits

    target_sn = cluster.target_sn

    mask_fits = fits.open(cluster.combined_mask)

    cts_image = np.zeros(mask_fits[0].data.shape)
    back_rescale = np.zeros(mask_fits[0].data.shape)

    t_obs = 0.0
    t_back = 0.0

    for obs in cluster.observations:
        obs_cts_image_fits = fits.open(obs.acisI_comb_img)
        obs_back_image_fits = fits.open(obs.backI_comb_img)

        cts_image += obs_cts_image_fits[0].data
        t_obs = obs_cts_image_fits[0].header['EXPOSURE']

        # back_rescale += obs_back_image_fits[0].data
        t_back = obs_back_image_fits[0].header['EXPOSURE']

        back_rescale += (t_obs/t_back)*obs_back_image_fits[0].data

    sigma_back = np.sqrt(back_rescale)
    sigma_cts = np.sqrt(cts_image)

    signal = cts_image - back_rescale

    signal[np.where(signal < 0)] = 0

    noise = np.sqrt(sigma_cts**2 + sigma_back**2)

    scale_file = cluster.scale_map_file
    sn_file = cluster.sn_map

    sz = signal.shape
    nx = sz[0]
    ny = sz[1]

    radius_map = np.zeros(sz)

    scale_map = np.zeros(sz)
    sn_map = np.zeros(sz)

    pix_x = np.zeros(sz)
    pix_y = np.zeros(sz)

    for j in range(ny):
        for i in range(nx):
            pix_x[i, j] = float(i)
            pix_y[i, j] = float(j)

    bpix_x = 0
    bpix_y = 0
    epix_x = nx
    epix_y = ny

    max_radius = 100
    r = max_radius + 1  # +1 may be a remnant of IDL indexing
    dr = 24.0
    min_dr = 0.125

    det = mask_fits[0].data

    num_pix = nx*ny

    ci=0

    start_time = time.time()

    # output_queue = mp.Queue()


    for cj in range(bpix_y,epix_y):
        #print("{} out of {} pixels complete.".format(cj*ci, num_pix))
        _update_completed_things(cj*ci, num_pix, "pixels")
        for ci in range(bpix_x, epix_x):
            if det[ci,cj] == 1:
                delta_x = ci-pix_x
                delta_y = cj-pix_y

                # an array where each value is the distance away from ci,cj
                radius = np.sqrt(delta_x**2 + delta_y**2)

                dr = 24.0
                hilo = 0
                niter = 0

                while (dr > min_dr) and (niter < 100):
                    indeces = np.where(radius <= r)
                    cts_map_total = np.sum(cts_image[indeces])

                    if cts_map_total == 0:
                        print("Encountered a source-free region -- recalculating...")
                        sn_val = 0
                        hilo = -1
                    else:
                        backmap_tot = np.sum(back_rescale[indeces])
                        signal_total = cts_map_total - backmap_tot
                        noise_total = np.sqrt(cts_map_total+backmap_tot)
                        sn_val = signal_total/noise_total
                    if sn_val < target_sn:
                        if r > max_radius:
                            r = max_radius + 1

                            niter = 110
                            # exit by setting niter=110.
                            # (niter=100 means niter hit max niter.
                            # niter=110 means radius hit max radius)

                            sn_map[ci,cj] = 0
                            scale_map[ci,cj] = 0
                        else:
                            if hilo == 1:
                                dr *= 0.5
                            r += dr
                            hilo = -1
                    else:
                        snmapval = sn_map[ci,cj]
                        if (sn_val < snmapval) or (snmapval == 0.0):
                            sn_map[ci,cj] = sn_val
                            scale_map[ci,cj] = r
                        if hilo == -1:
                            dr *= 0.5
                        r -= dr
                        hilo = 1

                    niter += 1
                # end while

    end_time = time.time()
    print("Time elapsed {:0.2f} seconds.".format(end_time - start_time))

    io.make_directory(cluster.acb_dir)

    obs_cts_image_fits[0].data = scale_map
    obs_cts_image_fits.writeto(cluster.scale_map_file, overwrite=True)

    obs_cts_image_fits[0].data = sn_map
    obs_cts_image_fits.writeto(cluster.sn_map, overwrite=True)