def extract_inputs_d(): """ :return: """ prfs_dict = extract_settings_elvis() inputs_d = {} cat_stars_loc = prfs_dict['references'] cat_stars = fits.open('{}/cat_stars.fits'.format(cat_stars_loc)) stars_data = Table(cat_stars[1].data) stars_df = stars_data.to_pandas() stars_idx = range(0, 28474, 1) # hardcoded - todo! stars_df['IDX'] = stars_idx inputs_d['stars'] = stars_df cat_galaxies_loc = prfs_dict['references'] cat_galaxies = fits.open('{}/cat_galaxies.fits'.format(cat_galaxies_loc)) galaxies_data = Table(cat_galaxies[1].data) galaxies_df = galaxies_data.to_pandas() galaxies_idx = range(0, 143766, 1) # hardcoded - todo! galaxies_df['IDX'] = galaxies_idx inputs_d['galaxies'] = galaxies_df return inputs_d
def extract_cats_d(): """ :return: """ cats_d = {} prfs_dict = extract_settings_elvis() for dither in range(1, 5, 1): cats_d[dither] = {} cats = get_cats(dither) for cat_name in cats: hdu_list = fits.open('{}/{}'.format(prfs_dict['fits_dir'], cat_name)) cat_data = Table(hdu_list[2].data) cat_df = cat_data.to_pandas() # Converts to Pandas format cat_number = get_cat(cat_name) # Gets cat's number from cat's name if cat_number == 0: print(cat_number, cat_name) cats_d[dither][cat_name] = cat_df cat_list = [cat_number] * cat_df['NUMBER'].size # print(cat_list) cats_d[dither][cat_name]['CATALOG_NUMBER'] = cat_list return cats_d
def __init__(self, logger): """ :param logger: """ self.prfs_d = extract_settings_elvis() self.logger = logger active_cosmic = [] fits_files = listdir(self.prfs_d['fits_dir']) for cosmic_idx in range(0, len(fits_files), self.prfs_d['cores_number']): try: cosmic_j = [] for proc in range(0, self.prfs_d['cores_number'], 1): idx = cosmic_idx + proc # index fits_file = fits_files[idx] cosmic_p = Process(target=self.cosmic_thread, args=(fits_file,)) cosmic_j.append(cosmic_p) cosmic_p.start() active_cosmic = list([job.is_alive() for job in cosmic_j]) while True in active_cosmic: active_cosmic = list([job.is_alive() for job in cosmic_j]) pass except IndexError: print('Extraction finished') print('Extraction process of fits images finished')
def __init__(self): """ Me da los valores de salida de todos las estrellas y galaxias presentes en filt 3 obtenidos o no """ self.filter_p_number = 3 # First one with enough data for statistics self.prfs_d = extract_settings_elvis() ccds = True filtered = False scamp = False self.input_df = read_csv('tmp_galaxies/galaxies.csv', index_col=0) self.filt_cat = self.gets_filtered_catalog() # Gets data from filtered if ccds: cats_d = self.extract_cats() self.extract_stats_ccds(cats_d) elif filtered: pass # self.extract_stats_filt() # not implemented yet elif scamp: pass # self.extract_stats_scamp(input_df) # not implemented yet else: pass
def __init__(self): """ """ self.prfs_d = misc.extract_settings_elvis() self.logger = misc.setting_logger(self.prfs_d) # Scamp configurations. mode = {'type': 'scamp'} self.scamp_confs, self.scamp_confs_n = create_configurations(mode) # Sextractor configurations. mode = {'type': 'sextractor'} self.sex_confs, sex_confs_n = create_configurations(mode) if sys.argv[1] == '-full': if not self.full_pipeline(): raise FullPipelineFailed elif sys.argv[1] == '-clean': if not self.clean(): raise CleanFailed elif sys.argv[1] == '-split': if not self.split(): raise SplitFailed elif sys.argv[1] == '-sextractor': if not self.sextractor(): raise SextractorFailed elif sys.argv[1] == '-scamp': if not self.scamp(): raise ScampFailed elif sys.argv[1] == '-filter': if not self.filt(): raise FiltFailed elif sys.argv[1] == '-restart': if not self.restart(): raise RestartFailed
def __init__(self): """ Me da los valores de salida de todos las estrellas y galaxias presentes en filt 3 obtenidos o no """ self.filter_p_number = 3 # First one with enough data for statistics self.prfs_d = extract_settings_elvis() self.data_d = create_output_dicts() self.save = True logger_name = 'scamp_performance' # Set as desired self.logger = setting_logger(self.prfs_d, logger_name) filt_cat = self.gets_filtered_catalog() # Gets data from filtered input_df = self.gets_data() # Gets data from catalogs unique_sources = list(set(filt_cat['SOURCE_NUMBER'].tolist())) sub_list_size = len(unique_sources) / self.prfs_d['cores_number'] sub_list_l = [] for idx_sub_list in range(0, self.prfs_d['cores_number'], 1): if idx_sub_list != (self.prfs_d['cores_number'] - 1): idx_down = sub_list_size * idx_sub_list idx_up = sub_list_size * (idx_sub_list + 1) sub_list_l.append(unique_sources[idx_down:idx_up]) else: idx_down = sub_list_size * idx_sub_list sub_list_l.append(unique_sources[idx_down:]) areas_j = [] for idx_l in range(0, self.prfs_d['cores_number'], 1): areas_p = Process(target=self.splits_data, args=(idx_l, sub_list_l[idx_l], filt_cat, input_df,)) areas_j.append(areas_p) areas_p.start() active_areas = list([job.is_alive() for job in areas_j]) while True in active_areas: active_areas = list([job.is_alive() for job in areas_j]) pass # Merges catalogues for key_ in ['stars', 'galaxies', 'ssos', 'lost']: csv_list = [] for idx_csv in range(0, self.prfs_d['cores_number'], 1): csv_ = read_csv('cat_{}_{}.csv'.format(idx_csv, key_), index_col=0) csv_list.append(csv_) full_df = concat(csv_list) full_df.to_csv('full_cats/cat_{}.csv'.format(key_)) # Removes old catalogues for key_ in ['stars', 'galaxies', 'ssos', 'lost']: for idx_csv in range(0, self.prfs_d['cores_number'], 1): remove('{}/cat_{}_{}.csv'.format(getcwd(), idx_csv, key_))
def test_right_settings_file_for_cab(self): """ :return: """ misc.get_os = MagicMock(return_value='cab') self.create_right_settings_file() return self.assertIs(type(misc.extract_settings_elvis()), dict)
def __init__(self, logger, analysis_d): """ :param logger: :param analysis_d: """ self.logger = logger self.analysis_d = analysis_d self.prfs_d = misc.extract_settings_elvis() self.sextractor_process()
def test_right_settings_file_for_cab_set_cores_number(self): """ :return: """ misc.get_os = MagicMock(return_value='cab') self.create_right_settings_file_cores_define() prfs_d = misc.extract_settings_elvis() return self.assertIs(prfs_d['cores_number'], 10)
def extract_ssos_df(): """ :return: """ prfs_dict = extract_settings_elvis() ssos_df = read_csv('{}/ssos_cat.txt'.format(prfs_dict['references']), delim_whitespace=True) ssos_source = range(0, ssos_df['RA'].size, 1) ssos_df['SOURCE'] = ssos_source return ssos_df
def __init__(self, logger, scmp_d): """ :param logger: :param scmp_d: """ self.prfs_d = misc.extract_settings_elvis() self.logger = logger self.scmp_d = scmp_d self.scamp_process()
def extract_galaxies_df(): """ :return: """ prfs_dict = extract_settings_elvis() cat_galaxies_loc = prfs_dict['references'] cat_galaxies = fits.open('{}/cat_galaxies.fits'.format(cat_galaxies_loc)) galaxies_data = Table(cat_galaxies[1].data) galaxies_df = galaxies_data.to_pandas() galaxies_idx = range(0, 143766, 1) galaxies_df['IDX'] = galaxies_idx return galaxies_df
def extract_stars_df(): """ :return: """ prfs_dict = extract_settings_elvis() cat_stars_loc = prfs_dict['references'] cat_stars = fits.open('{}/cat_stars.fits'.format(cat_stars_loc)) stars_data = Table(cat_stars[1].data) stars_df = stars_data.to_pandas() stars_idx = range(0, 28474, 1) # hardcoded - todo! stars_df['IDX'] = stars_idx return stars_df
def get_fpa_elvis(): """ :return: """ prfs_d = misc.extract_settings_elvis() fits_list = [] files = os.listdir('{}'.format(prfs_d['fpas_dir'])) for file_ in files: if file_[-5:] == '.fits': fits_list.append(file_) return fits_list
def create_scamp_df(): """ :return: scamp_df """ prfs_dict = extract_settings_elvis() filter_dir = prfs_dict['filtered'] filt_n = 'filt_' filter_o_n = '{}/{}'.format(filter_dir, filt_n) scamp_df = read_csv('{}_2.csv'.format(filter_o_n), index_col=0) return scamp_df
def get_borders(): """ :return: """ prfs_d = extract_settings_elvis() borders_d = {} for dither_ in range(1, 5, 1): borders_d[dither_] = {} fits_list = get_fits(dither_) for fits_ in fits_list: limits = get_fits_limits('{}/{}'.format(prfs_d['fits_dir'], fits_)) borders_d[dither_][fits_] = limits return borders_d
def __init__(self, logger, scmp_cf, sex_d): """ :param logger: :param mag: :param scmp_cf: :param sex_d: """ # Analysis variables self.prfs_d = extract_settings_elvis() self.logger = logger self.save = True # Filtered catalog dir self.filter_dir = self.prfs_d['filtered'] self.filt_n = 'filt_' self.filter_o_n = '{}/{}'.format(self.prfs_d['filtered'], self.filt_n) # Saves _1.csv (merged_db, full_db) = self.scamp_filter() # Saves _2.csv full_df = self.compute_pm(merged_db, full_db) # Saves _3.csv full_df = self.get_areas(full_df) # Saves _4.csv full_df = full_df[full_df['PM'] > 0.01] if self.save: self.save_message('4') full_df.to_csv('{}_4.csv'.format(self.filter_o_n)) # Saves _5.csv full_df = self.filter_class(full_df) fast_df = full_df[full_df['PM'] > 2] slow_df = full_df[full_df['PM'] < 2] # Saves _6f.csv fast_df = self.filter_coherence(fast_df) # Saves _6s.csv slow_df = self.filter_b_image(slow_df) # 8th version full_df = concat([fast_df, slow_df]) if self.save: self.save_message('9') full_df.to_csv('{}_9.csv'.format(self.filter_o_n))
def change_times(): """ hardcoded times: 2021-06-26T09:00:00.00000 2021-06-26T09:16:43.00000 2021-06-26T09:33:26.00000 2021-06-26T09:50:09.00000 hardcoded dir: """ prfs_d = extract_settings_elvis() logger = setting_logger(prfs_d) core_number = int(prfs_d['cores_number']) files = listdir(prfs_d['fits_dir']) fits_files = [] for file_ in files: if file_[-5:] == '.fits': fits_files.append(file_) for idx_fits in range(0, len(fits_files), core_number): try: time_j = [] for proc in range(0, core_number, 1): idx = idx_fits + proc time_p = Process(target=change_times_thread, args=( prfs_d, fits_files[idx], )) time_j.append(time_p) time_p.start() active_time = list([job.is_alive() for job in time_j]) while True in active_time: active_time = list([job.is_alive() for job in time_j]) pass except IndexError: logger.debug('extraction finished') return True
def get_fits(dither): """ Returns a list with all fits files in the fits directory of a chosen dither. :param dither: The chosen dither. :return: The list with the fits files. """ prfs_d = extract_settings_elvis() # Gets the preferences dictionary. fits_list = [] # Creates an empty list of fits files. # Gets all fits files. files = listdir('{}/'.format(prfs_d['fits_dir'])) for file_ in files: if file_[-5:] == '.fits': fits_list.append(file_) # Gets the images of the chosen dither. fits_out = [] for file_ in fits_list: if file_[-6:-5] == str(dither): # The fits_out.append(file_) return fits_out
def get_cats(dither): """ Returns a list with all catalogues in the catalogues directory of a chosen dither. :param dither: The chosen dither. :return: A list with the catalogues files. """ prfs_d = extract_settings_elvis() # Gets the preferences dictionary. cats_list = [] # Creates an empty list of catalogues. # Gets all the catalogues files. cats = listdir('{}'.format(prfs_d['fits_dir'])) for cat_ in cats: if cat_[-4:] == '.cat': cats_list.append(cat_) # Gets the catalogues of the chosen dither. list_out = [] for cat_ in cats_list: if cat_[-5:-4] == str(dither): list_out.append(cat_) return list_out
class_star = float(sex_df['CLASS_STAR'].iloc[0]) source_d['CLASS_STAR'].append(class_star) pm = float(df['PM'].iloc[0]) source_d['PM'].append(pm) pmerr = float(df['PMERR'].iloc[0]) source_d['PMERR'].append(pmerr) if len(source_d['DITHER']) != 0: for key_ in source_d.keys(): for value_ in source_d[key_]: cat_d[key_].append(value_) cat_df = DataFrame(cat_d, columns=[ 'DITHER', 'CATALOG_NUMBER', 'X_WORLD', 'Y_WORLD', 'MAG_AUTO', 'MAGERR_AUTO', 'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', 'ERRA_IMAGE', 'ERRB_IMAGE', 'ERRA_WORLD', 'ERRB_WORLD', 'ERRTHETA_WORLD', 'CLASS_STAR', 'PM', 'PMERR' ]) cat_df.to_csv('tmp_stars/stars_{}.csv'.format(idx_l)) if __name__ == "__main__": prfs_dict = extract_settings_elvis() catalogue = create_catalog()
def __init__(self): """ Me da los valores de salida de todos las estrellas y galaxias presentes en filt 3 obtenidos o no """ self.filter_p_number = 9 # First one with enough data for statistics self.prfs_d = extract_settings_elvis() self.input_d = extract_inputs_d() # False positives dictionary self.false_positives = { 1: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 2: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 3: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 4: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] } } self.right_positives = { 1: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 2: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 3: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 4: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] } } self.save = True logger_name = 'scamp_performance' # Set as desired self.logger = setting_logger(self.prfs_d, logger_name) filt_cat = self.gets_filtered_catalog() # Gets data from filtered input_df = self.gets_data() # Gets data from catalogs self.extract_stats(filt_cat, input_df) # Splits due type
for cat_ in cat_list: catalog = fits.open('{}/{}'.format(prfs_d['fits_dir'], cat_)) catalog_data = Table(catalog[2].data).to_pandas() alpha_tmp = catalog_data['ALPHA_J2000'].tolist() delta_tmp = catalog_data['DELTA_J2000'].tolist() for alpha_ in alpha_tmp: alpha_list.append(alpha_) for delta_ in delta_tmp: delta_list.append(delta_) alpha_series = Series(alpha_list, name='ALPHA_J2000') delta_series = Series(delta_list, name='DELTA_J2000') positions_table = concat([alpha_series, delta_series], axis=1) positions_table.to_csv('regions/extracted_{}.reg'.format(dither_), index=False, header=False, sep=" ") return True if __name__ == "__main__": prfs_d = extract_settings_elvis() if create_regions_by_dither(): pass else: raise Exception