예제 #1
0
def make_commands_lis(cluster: cluster.ClusterObj, resolution):
    print("Creating {}".format(cluster.command_lis))

    offset = [None, 5, 3, 1][resolution]

    start_time = time.time()

    region_list = cluster.scale_map_regions_to_fit(resolution)

    command_string = []

    pypeline_dir = io.get_user_input("Enter the directory containing the pix2pix.py portion of the pypeline on the remote machine: ")
    pix2pix_path = "{pypeline_dir}/pix2pix.py".format(pypeline_dir=pypeline_dir)

    data_dir = io.get_user_input("Enter the directory containing the cluster data on the remote machine:\n"
                                 "For example: /home/user/data/clustername/\n")

    for region in region_list:
        new_command = "python {pix2pix} {cluster_config} {region}".format(
            pix2pix=pix2pix_path,
            cluster_config="{data_dir}/{name}_pypeline_config.ini".format(
                data_dir=data_dir,
                name=cluster.name
            ),
            region=region
        )
        command_string.append(new_command)

    command_lis = "\n".join(command_string)
    region_string = '\n'.join([str(x) for x in region_list])
    io.write_contents_to_file(command_lis, cluster.command_lis, binary=False)
    io.write_contents_to_file(region_string, cluster.filtered_region_list, binary=False)
    end_time = time.time()
    print("Time elapsed: {time:0.2f} sec".format(time=(end_time-start_time)))
예제 #2
0
def ccd_sort(cluster):
    print("Running ccd_sort on {}.".format(cluster.name))
    for observation in cluster.observations:
        print("Working on {}/{}".format(cluster.name, observation.id))
        analysis_path = observation.analysis_directory
        os.chdir(analysis_path)
        evt1_filename = io.get_path("{}/{}".format(analysis_path,
                                                            io.get_filename_matching("acis*evt1.fits")[0]))
        evt2_filename = io.get_path("{}/{}".format(analysis_path,
                                                            io.get_filename_matching("evt2.fits")[0]))
        detname = rt.dmkeypar(infile=evt1_filename, keyword="DETNAM", echo=True)
        print("evt1 : {}\nevt2 : {}\ndetname : {}".format(evt1_filename,
                                                          evt2_filename,
                                                          detname))
        assert not isinstance(detname, type(None)), "detname returned nothing!"
        detnums = [int(x) for x in detname.split('-')[-1]]

        for acis_id in detnums:
            print("{cluster}/{observation}: Making level 2 event file for ACIS Chip id: {acis_id}".format(
                cluster=cluster.name,
                observation=observation.id,
                acis_id=acis_id))
            rt.dmcopy(infile="{evt2_file}[ccd_id={acis_id}]".format(evt2_file=evt2_filename,
                                                                    acis_id=acis_id),
                      outfile="acis_ccd{acis_id}.fits".format(acis_id=acis_id),
                      clobber=True)

        acisI_list = io.get_filename_matching("acis_ccd[0-3].fits")
        for i in range(len(acisI_list)):
            acisI_list[i] = io.get_path("{obs_analysis_dir}/{file}".format(obs_analysis_dir=observation.analysis_directory,
                                                                           file=acisI_list[i]))
        io.write_contents_to_file("\n".join(acisI_list), observation.ccd_merge_list, binary=False)
        merge_data_and_backgrounds(cluster, acisI_list)

    return
예제 #3
0
def prepare_for_spec(cluster_obj: cluster.ClusterObj):
    try:
        import ciao
    except ImportError:
        print("Must be running CIAO before running prepare_for_spec.")
        raise
    io.make_directory(cluster_obj.super_comp_dir)
    #io.make_directory(cluster_obj.sherpa_save_dir)
    cluster_obj.initialize_best_fits_file()
    print("Preparing files for spectral analysis and copying to {super_comp_dir} for offloading computation.".format(
        super_comp_dir=cluster_obj.super_comp_dir
    ))
    io.copy(cluster_obj.configuration_filename, cluster_obj.super_comp_cluster_config)
    for observation in cluster_obj.observations:
        print("Copying files for {obsid}".format(obsid=observation.id))
        io.copy(observation.clean, cluster_obj.acisI_clean_obs(observation.id))
        io.copy(observation.back, cluster_obj.backI_clean_obs(observation.id))
        io.copy(observation.aux_response_file, observation.arf_sc)
        io.copy(observation.redistribution_matrix_file, observation.rmf_sc)
        #io.copy(observation.effdtime, cluster_obj.effdtime_file_obs(observation.id))
        #io.copy(observation.effbtime, cluster_obj.effbtime_file_obs(observation.id))
        #io.copy(observation.scale_map_region_list, cluster_obj.scalemap_regionlist_file_obs(observation.id))
        io.copy(observation.acis_mask, observation.acis_mask_sc)

        exposure = ciao.get_exposure(observation.clean)

        io.write_contents_to_file(exposure, observation.exposure_time_file, binary=False)
예제 #4
0
def make_pcad_lis(cluster, obsid):
    analysis_dir = cluster.obs_analysis_directory(obsid)
    search_str = "{}/*asol1.fits".format(analysis_dir)
    pcad_files = io.get_filename_matching(search_str)
    pcad_list_string = "\n".join(pcad_files)
    pcad_filename = "{}/pcad_asol1.lis".format(analysis_dir)

    io.write_contents_to_file(pcad_list_string, pcad_filename, binary=False)

    return pcad_filename
예제 #5
0
def run_in_parallel(cluster_obj: cluster.ClusterObj, region_list, start_time):
    regions_processed = []
    num_regions = len(region_list)
    time_of_last = 0
    for region in region_list:
        pix2pix.pix_to_pix(cluster_obj, region, mp.current_process().name)
        regions_processed.append(region)
        if len(regions_processed) % 10 == 0:
            current_time = time.time()
            time_since_last = current_time - time_of_last
            time_of_last = time.time()

            average_second_per_completion = time_since_last / 10

            elapsed = current_time - start_time
            elapsed_str = time.strftime("%H Hours %M minutes %S seconds.",
                                        time.gmtime(elapsed))

            num_regions_processed = len(regions_processed)
            seconds_per_region = elapsed / num_regions_processed
            name = mp.current_process().name

            completion_seconds = (
                (num_regions - num_regions_processed) * seconds_per_region
            )  # in seconds
            completion_time = time.strftime("%H Hours %M minutes %S seconds.",
                                            time.gmtime(completion_seconds))

            log_string = "\n{color}*******************************************{reset}\n"\
                         "\n{name}: Elapsed time: {elapsed}\n"\
                         "{name}: Seconds per region (total average): {seconds_per_region}\n"\
                         "Seconds per region (last 10 regions fit): {recent_average}\n"\
                         "{num_processed} of {total_regions} processed\n" \
                         "Estimated time for completion: {completion}.\n" \
                         "\n{color}*******************************************{reset}\n".format(
                color=io.Colors.RED,
                reset=io.Colors.RESET,
                name=name,
                elapsed=elapsed_str,
                seconds_per_region=seconds_per_region,
                recent_average=average_second_per_completion,
                num_processed=num_regions_processed,
                total_regions=num_regions,
                completion=completion_time
            )
            print(log_string)
            log_file = "{super_comp_dir}/{p_name}_time.txt".format(
                super_comp_dir=cluster_obj.super_comp_dir,
                p_name=mp.current_process().name)
            io.write_contents_to_file(log_string, log_file, False)
            print("Manual garbage collection.")
            gc.collect()

    mp.Queue().put('Regions processed: {}'.format(len(regions_processed)))
예제 #6
0
def make_response_files(cluster):
    for obsid in cluster.observation_ids:
        print("Making response files for observation {}".format(obsid))
        obs_analysis_dir = cluster.obs_analysis_directory(obsid)
        region_file = io.get_path("{}/acisI_region_0.reg".format(obs_analysis_dir))

        if (not io.file_exists(region_file)) or (io.file_size(region_file) == 0):
            print("Region file {} does not exist.".format(region_file))
            print("When DS9 opens, draw a small circle that covers a piece of each ACIS-I chip (~20 pixels) and save it as:\n" \
                  "{}".format(region_file))
            print("Opening SAO DS9")
            io.write_contents_to_file("", region_file, False)
            ds9_arguments = "ds9 -regions system physical -regions shape circle -regions format ciao -zoom 0.5 " \
                            "-bin factor 4 {}/acisI_clean.fits".format(obs_analysis_dir)
            subprocess.run([ds9_arguments], shell=True)
        print('Creating global response file.')
        create_global_response_file_for(cluster, obsid, region_file)
예제 #7
0
def actually_merge_observations_from(cluster):
    print("Merging observations from {}.".format(cluster.name))

    merged_directory = io.get_path('{}/merged_obs_evt2/'.format(
        cluster.directory))

    io.make_directory(merged_directory)

    os.chdir(merged_directory)
    merged_observations = []

    for observation in cluster.observation_list:
        evt2_file = "{}/{}/analysis/evt2.fits".format(cluster.directory,
                                                      observation)
        merged_observations.append(evt2_file)

    merged_lis = "{}/merged_obs.lis".format(merged_directory)
    io.write_contents_to_file("\n".join(merged_observations),
                              merged_lis,
                              binary=False)
    outroot = io.get_path("{}/{}".format(cluster.directory, cluster.name))

    infile = "@{infile}[ccd_id=0:3]".format(infile=merged_lis)  # for ACIS-I
    # infile = "@{infile}".format(infile=merged_lis) # for ACIS-I & ACIS-S

    xygrid = "1500:6500:4,1500:6500:4"

    if len(merged_observations) == 1:
        rt.fluximage.punlearn()
        rt.fluximage(infile=infile,
                     outroot=outroot,
                     xygrid=xygrid,
                     clobber=True)
        print("Only single observation, flux image created.")

    elif len(merged_observations) > 1:
        rt.merge_obs.punlearn()
        rt.merge_obs(infiles=infile,
                     outroot=outroot,
                     xygrid=xygrid,
                     clobber=True,
                     parallel=True,
                     nproc=12)
예제 #8
0
def ciao_back(cluster, overwrite=False):
    print("Running ciao_back on {}.".format(cluster.name))

    for observation in cluster.observations:
        pcad_file = make_pcad_lis(cluster, observation.id)
        backI_lis = []
        backS_lis = []
        analysis_path = observation.analysis_directory
        filelist = io.read_contents_of_file(observation.ccd_merge_list).split('\n')
        pcad = io.read_contents_of_file(pcad_file)
        for acis_file in filelist:
            rt.acis_bkgrnd_lookup.punlearn()
            print("Finding background for {}".format(acis_file))
            path_to_background = rt.acis_bkgrnd_lookup(infile=acis_file)
            print("Found background at {}".format(path_to_background))
            acis_id = int(acis_file.split('/')[-1].split('.')[-2][-1])
            assert isinstance(acis_id, int), "acis_id = {}".format(acis_id)
            assert not isinstance(path_to_background, type(None)), "Cannot find background {}".format(acis_file)

            local_background_path = io.get_path("{}/back_ccd{}.fits".format(analysis_path, acis_id))
            try:
                if io.file_exists(local_background_path) and overwrite:
                    io.delete(local_background_path)
                io.copy(path_to_background, local_background_path)
            except OSError:
                print("Problem copying background file {}. Do you have the right permissions and a full CALDB?".format(
                    path_to_background))

                raise

            acis_gain = rt.dmkeypar(infile=acis_file,
                                    keyword="GAINFILE",
                                    echo=True)
            background_gain = rt.dmkeypar(infile=local_background_path,
                                          keyword="GAINFILE",
                                          echo=True)

            print("{}/{}/acis_ccd{}.fits gain: {}".format(cluster.name, observation.id, acis_id, acis_gain))
            print("{}/{}/back_ccd{}.fits gain: {}".format(cluster.name, observation.id, acis_id, background_gain))

            if dates_and_versions_match(acis_gain, background_gain):
                print("Date/version numbers don't match on the acis data and background. Reprocessing.")

                local_background_path = reprocess(cluster, observation.id, acis_gain, background_gain, acis_id)

            print("Reprojecting background")
            rt.reproject_events.punlearn()
            infile = local_background_path
            outfile = io.get_path("{local_path}/back_reproj_ccd{acis_id}.fits".format(local_path=analysis_path,
                                                                                      acis_id=acis_id))
            match = acis_file

            print(
                "Running:\n reproject_events(infile={infile}, outfile={outfile}, aspect={pcad}, match={match})".format(
                    infile=infile, outfile=outfile, pcad=pcad, match=match)
            )
            rt.reproject_events(infile=infile,
                                outfile=outfile,
                                aspect="{pcad_file}".format(pcad_file=pcad),
                                match=match,
                                random=0,
                                clobber=True)

            back_reproject = outfile
            datamode = rt.dmkeypar(infile=io.get_filename_matching(io.get_path("{}/acis*evt1*.fits".format(analysis_path))),
                                   keyword="DATAMODE")
            if datamode == "VFAINT":
                print("VFAINT Mode, resetting setting status bits")
                rt.dmcopy.punlearn()
                rt.dmcopy(infile="{}[status=0]".format(back_reproject),
                          outfile=outfile,
                          clobber=True)
            if acis_id <= 3:
                backI_lis.append(back_reproject)
            else:
                backS_lis.append(back_reproject)

        merged_back_list = backI_lis + backS_lis

        print("writing backI.lis and backS.lis")
        io.write_contents_to_file("\n".join(backI_lis), io.get_path("{}/backI.lis".format(analysis_path)),
                                  binary=False)
        io.write_contents_to_file("\n".join(backS_lis), io.get_path("{}/backS.lis".format(analysis_path)),
                                  binary=False)
        io.write_contents_to_file("\n".join(merged_back_list), observation.merged_back_lis, binary=False)

    return
예제 #9
0
def make_commands_lis(cluster: cluster.ClusterObj, resolution):
    from astropy.io import fits

    print("Creating {}".format(cluster.command_lis))

    offset = [None, 5, 3, 1][resolution]


    start_time = time.time()

    scalemap = cluster.scale_map
    # mask = fits.open(cluster.combined_mask)[0].data
    region_list = cluster.scale_map_regions_to_fit(resolution)
    #sz = scalemap.shape
    #sx = sz[0]
    #sy = sz[1]

    command_string = []

    pypeline_dir = io.get_user_input("Enter the directory containing the pix2pix.py portion of the pypeline on the remote machine: ")
    pix2pix_path = "{pypeline_dir}/pix2pix.py".format(pypeline_dir=pypeline_dir)

    data_dir = io.get_user_input("Enter the directory containing the cluster data on the remote machine:\n"
                                 "For example: /home/user/data/clustername/\n")

    for region in region_list:
        new_command = "python {pix2pix} {cluster_config} {region}".format(
            pix2pix=pix2pix_path,
            cluster_config="{data_dir}/{name}_pypeline_config.ini".format(
                data_dir=data_dir,
                name=cluster.name
            ),
            region=region
        )
        command_string.append(new_command)

    # for x in range(sx):
    #     for y in range(sy):
    #         if mask[i,j] == 1:
    #             image_radius = scalemap[i,j]
    #             if 0 < image_radius <= 100:
    #                 if i % offset == 0 and j % offset == 0:
    #                     new_command = "python {pix2pix} {cluster_config} {region}".format(
    #                         pix2pix=pix2pix_path,
    #                         cluster_config="{data_dir}/{name}_pypeline_config.ini".format(
    #                             data_dir=data_dir,
    #                             name=cluster.name
    #                         ),
    #                         region=position
    #                     )
    #                     command_string.append(new_command)
    #                     region_list.append("{}".format(position))
    #                 #end if
    #             # end if
    #             position += 1
    command_lis = "\n".join(command_string)
    region_string = '\n'.join([str(x) for x in region_list])
    io.write_contents_to_file(command_lis, cluster.command_lis, binary=False)
    io.write_contents_to_file(region_string, cluster.filtered_region_list, binary=False)
    end_time = time.time()
    print("Time elapsed: {time:0.2f} sec".format(time=(end_time-start_time)))