def __init__(self): """ """ self.prfs_d = misc.extract_settings_elvis() self.logger = misc.setting_logger(self.prfs_d) # Scamp configurations. mode = {'type': 'scamp'} self.scamp_confs, self.scamp_confs_n = create_configurations(mode) # Sextractor configurations. mode = {'type': 'sextractor'} self.sex_confs, sex_confs_n = create_configurations(mode) if sys.argv[1] == '-full': if not self.full_pipeline(): raise FullPipelineFailed elif sys.argv[1] == '-clean': if not self.clean(): raise CleanFailed elif sys.argv[1] == '-split': if not self.split(): raise SplitFailed elif sys.argv[1] == '-sextractor': if not self.sextractor(): raise SextractorFailed elif sys.argv[1] == '-scamp': if not self.scamp(): raise ScampFailed elif sys.argv[1] == '-filter': if not self.filt(): raise FiltFailed elif sys.argv[1] == '-restart': if not self.restart(): raise RestartFailed
def __init__(self): """ Me da los valores de salida de todos las estrellas y galaxias presentes en filt 3 obtenidos o no """ self.filter_p_number = 3 # First one with enough data for statistics self.prfs_d = extract_settings_elvis() self.data_d = create_output_dicts() self.save = True logger_name = 'scamp_performance' # Set as desired self.logger = setting_logger(self.prfs_d, logger_name) filt_cat = self.gets_filtered_catalog() # Gets data from filtered input_df = self.gets_data() # Gets data from catalogs unique_sources = list(set(filt_cat['SOURCE_NUMBER'].tolist())) sub_list_size = len(unique_sources) / self.prfs_d['cores_number'] sub_list_l = [] for idx_sub_list in range(0, self.prfs_d['cores_number'], 1): if idx_sub_list != (self.prfs_d['cores_number'] - 1): idx_down = sub_list_size * idx_sub_list idx_up = sub_list_size * (idx_sub_list + 1) sub_list_l.append(unique_sources[idx_down:idx_up]) else: idx_down = sub_list_size * idx_sub_list sub_list_l.append(unique_sources[idx_down:]) areas_j = [] for idx_l in range(0, self.prfs_d['cores_number'], 1): areas_p = Process(target=self.splits_data, args=(idx_l, sub_list_l[idx_l], filt_cat, input_df,)) areas_j.append(areas_p) areas_p.start() active_areas = list([job.is_alive() for job in areas_j]) while True in active_areas: active_areas = list([job.is_alive() for job in areas_j]) pass # Merges catalogues for key_ in ['stars', 'galaxies', 'ssos', 'lost']: csv_list = [] for idx_csv in range(0, self.prfs_d['cores_number'], 1): csv_ = read_csv('cat_{}_{}.csv'.format(idx_csv, key_), index_col=0) csv_list.append(csv_) full_df = concat(csv_list) full_df.to_csv('full_cats/cat_{}.csv'.format(key_)) # Removes old catalogues for key_ in ['stars', 'galaxies', 'ssos', 'lost']: for idx_csv in range(0, self.prfs_d['cores_number'], 1): remove('{}/cat_{}_{}.csv'.format(getcwd(), idx_csv, key_))
def change_times(): """ hardcoded times: 2021-06-26T09:00:00.00000 2021-06-26T09:16:43.00000 2021-06-26T09:33:26.00000 2021-06-26T09:50:09.00000 hardcoded dir: """ prfs_d = extract_settings_elvis() logger = setting_logger(prfs_d) core_number = int(prfs_d['cores_number']) files = listdir(prfs_d['fits_dir']) fits_files = [] for file_ in files: if file_[-5:] == '.fits': fits_files.append(file_) for idx_fits in range(0, len(fits_files), core_number): try: time_j = [] for proc in range(0, core_number, 1): idx = idx_fits + proc time_p = Process(target=change_times_thread, args=( prfs_d, fits_files[idx], )) time_j.append(time_p) time_p.start() active_time = list([job.is_alive() for job in time_j]) while True in active_time: active_time = list([job.is_alive() for job in time_j]) pass except IndexError: logger.debug('extraction finished') return True
def __init__(self): """ Me da los valores de salida de todos las estrellas y galaxias presentes en filt 3 obtenidos o no """ self.filter_p_number = 9 # First one with enough data for statistics self.prfs_d = extract_settings_elvis() self.input_d = extract_inputs_d() # False positives dictionary self.false_positives = { 1: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 2: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 3: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 4: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] } } self.right_positives = { 1: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 2: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 3: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] }, 4: { 'SOURCE': [], 'RA': [], 'DEC': [], 'MAG': [], 'PM': [], 'PMERR': [], 'CLASS': [], 'OBJECT': [] } } self.save = True logger_name = 'scamp_performance' # Set as desired self.logger = setting_logger(self.prfs_d, logger_name) filt_cat = self.gets_filtered_catalog() # Gets data from filtered input_df = self.gets_data() # Gets data from catalogs self.extract_stats(filt_cat, input_df) # Splits due type
@param mag: """ # TODO create start catalog input_cat = '{}/catalogue_{}.cat'.format(prfs_d['output_cats'], mag) logger.debug('input_catalogue is {}'.format(input_cat)) remove_ssos = True final_catalogue = rebase_catalogue(logger, mag, prfs_d, remove_ssos, input_cat) if not rewriting_catalogue(logger, prfs_d, final_catalogue, mag): raise Exception if __name__ == '__main__': logger = setting_logger() prfs_d = extract_settings() try: if argv[1] == '-change': for mag in prfs_d['mags']: print('mag {}'.format(mag)) change_times(logger, prfs_d, mag) elif argv[1] == '-rebase': cores_number = prfs_d['cores_number'] if cores_number > len(prfs_d['mags']): cores_number = len(prfs_d['mags']) rebase_j = [] for proc in range(0, cores_number, 1): rebase_p = Process(target=rebase_thread, args=(logger, prfs_d, prfs_d['mags'][proc]))