def test_start(self, _temporary_session_directory): """ Create temporary test directory, fill it with images, run start(), do asserts, tear down temporary test directory. """ source_path, temp_path = _temporary_session_directory os.chdir(source_path) defaults_dict = ini.make_defaults_dict() log_filename = defaults_dict['session log filename'] temp_log_fullpath = os.path.join(temp_path, log_filename) assert not os.path.isfile(temp_log_fullpath) # before start(). assert os.getcwd() == source_path session.start(TEST_SESSION_TOP_DIRECTORY, TEMP_TEST_MP, TEST_AN, 'Clear') assert os.getcwd() == temp_path assert os.path.isfile(temp_log_fullpath) assert set(get_mp_filenames(temp_path)) == set(get_mp_filenames(source_path))
def _get_filenames_time_order(directory): """ Return list of FITS filenames in time order:""" dict_list = [] for filename in get_mp_filenames(directory): fullpath = os.path.join(directory, filename) hdu = apyfits.open(fullpath)[0] jd_start = fits_header_value(hdu, 'JD') dict_list.append({'Filename': filename, 'JD': jd_start}) return pd.DataFrame(data=dict_list).sort_values(by='JD')['Filename'].values
def _make_test_session_directory(source_path, temp_path): """ Make a fresh test directory (probably with test MP not matching its filename MP). :param source_path: from which FITS files are copied, treated as read-only. [string] :param temp_path: new test directory to populate with FITS files from source_path. [string] :return: [None] """ os.makedirs(temp_path, exist_ok=True) fits_filenames = get_mp_filenames(source_path) for fn in fits_filenames: source_fullpath = os.path.join(source_path, fn) shutil.copy2(source_fullpath, temp_path)
def test_get_mp_filenames(): this_directory = os.path.join(TEST_SESSIONS_DIRECTORY, 'MP_' + str(SOURCE_TEST_MP), 'AN' + str(TEST_AN)) mp_filenames = util.get_mp_filenames(this_directory) assert isinstance(mp_filenames, list) assert all([isinstance(fn, str) for fn in mp_filenames]) assert len(mp_filenames) == 7 assert all([fn.startswith('MP_') for fn in mp_filenames]) assert all( [fn[-4:] in util.VALID_FITS_FILE_EXTENSIONS for fn in mp_filenames]) assert len(set(mp_filenames)) == len(mp_filenames) # filenames are unique.
def do_fits_assessments(defaults_dict, this_directory): return_dict = { 'file not read': [], # list of filenames 'filter not read': [], # " 'file count by filter': [], # list of tuples (filter, file count) 'warning count': 0, # total count of all warnings. 'not platesolved': [], # list of filenames 'not calibrated': [], # " 'unusual fwhm': [], # list of tuples (filename, fwhm) 'unusual focal length': [] } # list of tuples (filename, focal length) # Count FITS files by filter, write totals # (we've stopped classifying files by intention; now we include all valid FITS in dfs): filter_counter = Counter() valid_fits_filenames = [] all_fits_filenames = util.get_mp_filenames(this_directory) for filename in all_fits_filenames: fullpath = os.path.join(this_directory, filename) try: hdu = apyfits.open(fullpath)[0] except FileNotFoundError: print(' >>>>> WARNING: can\'t find file', fullpath, 'Skipping file.') return_dict['file not read'].append(filename) except (OSError, UnicodeDecodeError): print(' >>>>> WARNING: can\'t read file', fullpath, 'as FITS. Skipping file.') return_dict['file not read'].append(filename) else: fits_filter = util.fits_header_value(hdu, 'FILTER') if fits_filter is None: print(' >>>>> WARNING: filter in', fullpath, 'cannot be read. Skipping file.') return_dict['filter not read'].append(filename) else: valid_fits_filenames.append(filename) filter_counter[fits_filter] += 1 for filter in filter_counter.keys(): print(' ' + str(filter_counter[filter]), 'in filter', filter + '.') return_dict['file count by filter'].append( (filter, filter_counter[filter])) # Start dataframe for main FITS integrity checks: fits_extensions = pd.Series( [os.path.splitext(f)[-1].lower() for f in valid_fits_filenames]) df = pd.DataFrame({ 'Filename': valid_fits_filenames, 'Extension': fits_extensions.values }).sort_values(by=['Filename']) df = df.set_index('Filename', drop=False) df['PlateSolved'] = False df['Calibrated'] = False df['FWHM'] = np.nan df['FocalLength'] = np.nan # Populate df with FITS header info needed for validity tests below: for filename in df.index: fullpath = os.path.join(this_directory, filename) hdu = apyfits.open(fullpath)[ 0] # already known to be valid, from above. df.loc[filename, 'PlateSolved'] = util.fits_is_plate_solved(hdu) df.loc[filename, 'Calibrated'] = util.fits_is_calibrated(hdu) df.loc[filename, 'FWHM'] = util.fits_header_value(hdu, 'FWHM') df.loc[filename, 'FocalLength'] = util.fits_focal_length(hdu) jd_start = util.fits_header_value(hdu, 'JD') exposure = util.fits_header_value(hdu, 'EXPOSURE') jd_mid = jd_start + (exposure / 2) / 24 / 3600 df.loc[ filename, 'JD_mid'] = jd_mid # needed only to write control.ini stub (1st & last FITS). # Warn of FITS without plate solution: filenames_not_platesolved = df.loc[~df['PlateSolved'], 'Filename'] if len(filenames_not_platesolved) >= 1: print('NO PLATE SOLUTION:') for fn in filenames_not_platesolved: print(' ' + fn) return_dict['not platesolved'].append(fn) print('\n') else: print('All platesolved.') return_dict['warning count'] += len(filenames_not_platesolved) # Warn of FITS without calibration: filenames_not_calibrated = df.loc[~df['Calibrated'], 'Filename'] if len(filenames_not_calibrated) >= 1: print('\nNOT CALIBRATED:') for fn in filenames_not_calibrated: print(' ' + fn) return_dict['not calibrated'].append(fn) print('\n') else: print('All calibrated.') return_dict['warning count'] += len(filenames_not_calibrated) # Warn of FITS with very large or very small FWHM: odd_fwhm_list = [] instrument_dict = ini.make_instrument_dict(defaults_dict) # settings = Settings() min_fwhm = 0.5 * instrument_dict['nominal fwhm pixels'] max_fwhm = 2.0 * instrument_dict['nominal fwhm pixels'] for fn in df['Filename']: fwhm = df.loc[fn, 'FWHM'] if fwhm < min_fwhm or fwhm > max_fwhm: # too small or large: odd_fwhm_list.append((fn, fwhm)) if len(odd_fwhm_list) >= 1: print('\nUnusual FWHM (in pixels):') for fn, fwhm in odd_fwhm_list: print(' ' + fn + ' has unusual FWHM of ' + '{0:.2f}'.format(fwhm) + ' pixels.') return_dict['unusual fwhm'].append((fn, fwhm)) print('\n') else: print('All FWHM values seem OK.') return_dict['warning count'] += len(odd_fwhm_list) # Warn of FITS with abnormal Focal Length: odd_fl_list = [] median_fl = df['FocalLength'].median() for fn in df['Filename']: fl = df.loc[fn, 'FocalLength'] focal_length_pct_deviation = 100.0 * abs((fl - median_fl)) / median_fl if focal_length_pct_deviation > FOCAL_LENGTH_MAX_PCT_DEVIATION: odd_fl_list.append((fn, fl)) if len(odd_fl_list) >= 1: print('\nUnusual FocalLength (vs median of ' + '{0:.1f}'.format(median_fl) + ' mm:') for fn, fl in odd_fl_list: print(' ' + fn + ' has unusual Focal length of ' + str(fl)) return_dict['unusual focal length'].append((fn, fl)) print('\n') else: print('All Focal Lengths seem OK.') return_dict['warning count'] += len(odd_fl_list) return df, return_dict