示例#1
0
def fix_headers(folder):
    """
    Find FITS files in folder that have WCS CTYPE ``RA--TAN`` and ``DEC--TAN``
    and add ``-SIP`` to the end. Necessary because astropy handling of SIP
    distortion keywords changed in v1.2.

    Parameters
    ----------

    folder : str
        Path to folder with the images to fix.
    """
    ic = ImageFileCollection(folder)
    ic.summary['ctype1']

    files = ic.files_filtered(ctype1='RA---TAN')

    if not len(files):
        print('No files to fix in {}'.format(folder))

    for file in files:
        fname = os.path.join(folder, file)
        print('Fixing file {}'.format(file))
        with fits.open(fname) as f:
            f[0].header['ctype1'] = f[0].header['ctype1'] + '-SIP'
            f[0].header['ctype2'] = f[0].header['ctype2'] + '-SIP'
            f.writeto(fname, clobber=True)
示例#2
0
 def test_run_triage_on_set_with_no_light_files(self):
     ic = ImageFileCollection(self.test_dir.strpath, keywords=['imagetyp'])
     for header in ic.headers(imagetyp='light', overwrite=True):
         header['imagetyp'] = 'BIAS'
     arglist = [self.test_dir.strpath]
     run_triage.main(arglist)
     assert 1
示例#3
0
def add_observer(top_of_tree, observers):
    """
    Add observer name to each of the FITS files in a tree of directories

    Parameters
    ----------

    top_of_tree : str
        Path to top of the directory tree containing the images to be modified
    observers : dict
        Dictionary with observation date of the form YYYY-MM-DD as keys and
        name(s) of observer(s) as a single string as the value.

    .. warning::
        This function will overwrite the FITS files in the tree.
    """
    for root, dirs, files in os.walk(top_of_tree):
        date = obs_date(root)
        if not date:
            continue
        logging.info('Processing directory %s with observers: %s', root,
                     observers[date])
        ic = ImageFileCollection(root, keywords=['imagetyp'])
        observer_keyword = FITSKeyword(name='observer', value=observers[date])
        for hdr, fname in ic.headers(clobber=True, return_fname=True):
            if ('observer' in hdr) and hdr['purged']:
                logging.warning(
                    'Skipping file %s in %s because observer '
                    'has already been added', fname, root)
                continue
            observer_keyword.add_to_header(hdr, history=True)
示例#4
0
def test_add_ra_dec_from_object_name_edge_cases(caplog):
    # add a 'dec' keyword to every light file so that none need RA/Dec
    ic = ImageFileCollection(_test_dir, keywords=['imagetyp'])
    for h in ic.headers(imagetyp='light', overwrite=True):
        h['dec'] = '+17:42:00'
        h['ra'] = '03:45:06'
        h['object'] = 'm101'
    # does this succeed without errors?
    ph.add_ra_dec_from_object_name(_test_dir)

    # add name that will fail as object of one image
    image_path = path.join(_test_dir, _test_image_name)
    f = fits.open(image_path)
    h = f[0].header

    try:
        del h['RA']
        del h['dec']
    except KeyError:
        pass

    h['object'] = 'i am a fake object'
    f.writeto(image_path, overwrite=True)
    ph.add_ra_dec_from_object_name(_test_dir)
    warns = get_patch_header_logs(caplog, level=logging.WARN)
    assert 'Unable to lookup' in warns
示例#5
0
def test_add_object_name_logic_when_all_images_have_matching_object(caplog):
    ic = ImageFileCollection(_test_dir, keywords=['imagetyp'])
    for h in ic.headers(imagetyp='light', overwrite=True):
        h['imagetyp'] = 'FLAT'
    ph.add_object_info(_test_dir)
    infos = get_patch_header_logs(caplog, level=logging.INFO)
    alls = get_patch_header_logs(caplog)
    print(alls)
    assert 'NO OBJECTS MATCHED' in infos
示例#6
0
 def test_triage_grabbing_all_keywords_gets_them_all(self):
     tbl_name = 'tbl.txt'
     run_triage.main(arglist=['-a', '-t', tbl_name, self.test_dir.strpath])
     rt_table = Table.read(self.test_dir.join(tbl_name).strpath,
                           format='ascii.csv')
     lcase_columns = [c.lower() for c in rt_table.colnames]
     print(lcase_columns)
     ic = ImageFileCollection(self.test_dir.strpath, keywords='*')
     for h in ic.headers():
         for k in h:
             if k:
                 assert k.lower() in lcase_columns
示例#7
0
def crop_masters(path=cmpath):
    mastercollection=ImageFileCollection('Master_Files')
    for image, imname in mastercollection.ccds(imtype='trimmed bias',return_fname=True):
        trimage=ccdp.trim_image(image,fits_section=str(sciwin))
        trimage.meta['trimwind']=(str(sciwin),'readout window')
        trimage.meta['imtype'] = ('mbias', 'windowed master bias')
        trimage.write(path+imname,overwrite=True)
    for image, imname in mastercollection.ccds(imtype='subflat',return_fname=True):
        trimage=ccdp.trim_image(image,fits_section=str(sciwin))
        trimage.meta['trimwind']=(str(sciwin),'readout window')
        trimage.meta['imtype'] = ('mflat', 'windowed master flat')
        trimage.write(path+imname,overwrite=True)
示例#8
0
def test_unit_is_added():
    # patch in _test_dir, overwriting existing files
    ph.patch_headers(_test_dir, overwrite=True, new_file_ext='')
    ic = ImageFileCollection(_test_dir, keywords='*')
    feder = Feder()
    print(_test_dir)
    for h, f in ic.headers(return_fname=True):
        instrument = feder.instruments[h['instrume']]
        if instrument.image_unit is not None:
            print(str(instrument.image_unit), f)
            # If the instrument has a unit, the header should too.
            assert h['BUNIT'] == str(instrument.image_unit)
示例#9
0
def bias_combine(refresh=0):
    tbiascollection = ImageFileCollection('Trimmed_Bias')
    print('found', len(tbiascollection.values('file')), 'trimmed biases')
    combined_bias = ccdp.combine(tbiascollection.files_filtered(
        imtype='trimmed bias', include_path=True),
                                 sigma_clip=True,
                                 sigma_clip_low_thresh=5,
                                 sigma_clip_high_thresh=5,
                                 sigma_clip_func=np.nanmedian,
                                 sigma_clip_dev_func=mad_std)
    combined_bias.meta['combined'] = True
    combined_bias.write('Master_Files/mbias.fits', overwrite=True)
    return tbiascollection
示例#10
0
    def test_quick_add_keys_records_history(self, keyword_arg, file_arg,
                                            file_column):
        ic = ImageFileCollection(self.test_dir.strpath, keywords=['imagetyp'])
        ic.summary.keep_columns('file')

        file_list = os.path.join(ic.location, 'files.txt')
        keyword_list = os.path.join(ic.location, 'keys.txt')

        full_paths = [
            os.path.join(self.test_dir.strpath, fil)
            for fil in ic.summary['file']
        ]
        print('fill paths: %s' % ' '.join(full_paths))
        ic.summary['file'][:] = full_paths
        ic.summary.remove_column('file')
        ic.summary.add_column(Column(data=full_paths, name=file_column))
        ic.summary.write(file_list, format='ascii')
        if file_column != 'file':
            ic.summary.rename_column(file_column, 'file')
        dumb_keyword = 'munkeez'.upper()
        dumb_value = 'bananaz'
        keywords = Column(data=[dumb_keyword], name='Keyword')
        vals = Column(data=[dumb_value], name='value')
        keyword_table = Table()
        keyword_table.add_columns([keywords, vals])
        keyword_table.write(keyword_list, format='ascii')
        args_for = {}
        args_for['--key-file'] = [keyword_list]
        args_for['--key-value'] = [dumb_keyword, dumb_value]
        args_for['--file-list'] = [file_list]
        args_for[''] = full_paths
        argslist = [keyword_arg]
        argslist.extend(args_for[keyword_arg])
        if file_arg:
            argslist.append(file_arg)
        argslist.extend(args_for[file_arg])
        if file_column.lower() != 'file' and file_arg:
            with pytest.raises(ValueError):
                quick_add_keys_to_file.main(argslist)
            return
        else:
            quick_add_keys_to_file.main(argslist)


#        add_keys(file_list=file_list, key_file=keyword_list)
        for header in ic.headers():
            assert (header[dumb_keyword] == dumb_value)
            history_string = ' '.join(header['history'])
            assert (dumb_keyword in history_string)
            assert (dumb_value in history_string)
示例#11
0
def combine_flats(refresh='2', method='2'):
    if method == '1':
        meta = 'med'
        source = 'Trimmed_Flat/subflatsmed'
        dest = 'Master_Files/mflat_median.fits'
    elif method == '2':
        meta = 'sig'
        source = 'Trimmed_Flat/subflatssig'
        dest = 'Master_Files/mflat.fits'
    subflatcollection = ImageFileCollection(source)
    combtime = 0
    if refresh == '1':
        print('found', len(subflatcollection.values('file')), 'subflats')
        start = time.time()
        if method == '1':
            mflat = ccdp.combine(subflatcollection.files_filtered(
                imtype='subflat', include_path=True),
                                 method='median')
            mflat.meta['flatcom'] = 'median'
            combtime = time.time() - start
            print('combination took', combtime, 'seconds')
        elif method == '2':
            mflat = ccdp.combine(subflatcollection.files_filtered(
                imtype='subflat', include_path=True),
                                 sigma_clip=True,
                                 sigma_clip_low_thresh=5,
                                 sigma_clip_high_thresh=5,
                                 sigma_clip_func=np.nanmedian,
                                 sigma_clip_dev_func=mad_std)
            mflat.meta['flatcom'] = 'sigma'
            combtime = time.time() - start
            print('combination took', combtime, 'seconds')
        mflat.meta['normmed'] = (np.nanmedian(mflat),
                                 'nanmedian of the master flat')
        mflat.meta['subflats'] = meta
        mflat.write(dest[0:-5] + '_' + meta + '.fits', overwrite=True)
    else:
        try:
            if method == '1':
                mflat = CCDData.read('Master_Files/mflat_median_med.fits',
                                     unit='adu')

            elif method == '2':
                mflat = CCDData.read('Master_Files/mflat_sig.fits', unit='adu')
        except:
            print('can\'t locate master flat, create or check directory')
            sys.exit()
    return subflatcollection, mflat, dest, combtime
示例#12
0
def test_patch_headers_stops_if_instrument_or_software_not_found(
        badkey, caplog):
    ic = ImageFileCollection(_test_dir, keywords=['imagetyp'])
    # need a header that contains IMAGETYP so that it will be processed
    a_fits_file = ''
    for h, f in ic.headers(imagetyp='*', return_fname=True):
        a_fits_file = f
        break
    a_fits_hdu = fits.open(path.join(_test_dir, a_fits_file))
    hdr = a_fits_hdu[0].header
    badname = 'Nonsense'
    hdr[badkey] = badname
    a_fits_hdu.writeto(path.join(_test_dir, a_fits_file), overwrite=True)

    with pytest.raises(KeyError):
        ph.patch_headers(_test_dir)
    def __call__(self, data_path, output_path=None, glob_include='*.fits'):
        self.data_path = data_path
        if output_path is None:
            self.output_path = data_path
        else:
            self.output_path = output_path

        image_collection = ImageFileCollection(location=self.data_path,
                                               keywords=self.keywords,
                                               glob_include=glob_include)

        if image_collection is not None:

            self.image_collection = image_collection.summary.to_pandas()

            grouped_data = self._classify_images(ic=self.image_collection)

            for group in grouped_data:
                file_list = group.file.tolist()
                for _file in file_list:
                    print(_file)
                output_name = self._get_combined_name(file_list=file_list,
                                                      prefix='comb_')

                combined = self._combine_data(file_list=file_list,
                                              output_name=output_name,
                                              data_path=self.data_path,
                                              out_path=self.output_path)

                extracted_name = self._get_extracted_name(combined_name=output_name)

                if combined.header['OBSTYPE'] == 'COMP':
                    extracted = self._extract_lamp(ccd=combined,
                                                   output_name=extracted_name)
示例#14
0
def t120_mkoffset(offset_dir=t120.t120_ofst_dir,
                  master_file_name=t120.t120_master_name):
    master_file = offset_dir + master_file_name
    listimg = ImageFileCollection(
        offset_dir)  #,glob_include='*.fit',glob_exclude='*.fits')
    listccd = []
    for ccd, file_name in listimg.ccds(ccd_kwargs={'unit': 'adu'},
                                       return_fname=True):
        t120.log.info('now considering file ' + file_name)
        listccd.append(ccd)

    combiner = Combiner(listccd)
    t120.log.info('now making the combination')
    master_offset = combiner.median_combine()
    fits_ccddata_writer(master_offset, master_file)
    t120.log.info('Result saved in ' + master_file)
    return master_file
示例#15
0
def main(fits_directory,
         jpeg_directory,
         base_url,
         night,
         thumbnail_directory='thumbnail',
         org_by=None):
    """

    org_by: list, optional
        List of FITS keywords by which the files should be organized.
    """

    ic = ImageFileCollection(fits_directory, keywords='*')

    groups = ['BIAS', 'DARK', 'FLAT', 'LIGHT']

    image_groups = OrderedDict()
    for group in groups:
        images = []
        for header, fname in ic.headers(imagetyp=group, return_fname=True):
            f = os.path.basename(fname)
            # Try replacing all the extensions with jpg...
            jpeg_name = (f.replace('.fit',
                                   '.jpg').replace('.fts', '.jpg').replace(
                                       '.fits', '.jpg'))
            images.append({
                'jpeg_name': jpeg_name,
                'title': construct_image_title(f, header),
                'original_name': f
            })
        image_groups[group] = images

    loader = jinja2.FileSystemLoader('.')
    e = jinja2.Environment(loader=loader, autoescape=True)

    template = e.get_template('viewer_page.html')

    foo = template.render(night=night,
                          base_url=base_url,
                          image_groups=image_groups,
                          base_url_thumb=os.path.join(base_url,
                                                      thumbnail_directory))

    return foo
示例#16
0
def bias_combine(refresh='2', method='2'):
    tbiascollection = ImageFileCollection('Trimmed_Bias')
    combtime = 0
    if refresh == '1':
        print('found', len(tbiascollection.values('file')), 'trimmed biases')
        start = time.time()
        if method == '1':
            combined_bias = ccdp.combine(tbiascollection.files_filtered(
                imtype='trimmed bias', include_path=True),
                                         method='median')
            combbiaspath = 'Master_Files/mbias_median.fits'
            combined_bias.meta['combined'] = 'median'
            combtime = time.time() - start
            print('combination took', combtime, 'seconds')
            combined_bias.write(combbiaspath, overwrite=True)
        elif method == '2':
            combined_bias = ccdp.combine(tbiascollection.files_filtered(
                imtype='trimmed bias', include_path=True),
                                         sigma_clip=True,
                                         sigma_clip_low_thresh=5,
                                         sigma_clip_high_thresh=5,
                                         sigma_clip_func=np.nanmedian,
                                         sigma_clip_dev_func=mad_std)
            combbiaspath = 'Master_Files/mbias.fits'
            combined_bias.meta['combined'] = 'sigma_clip average'
            combtime = time.time() - start
            print('combination took', combtime, 'seconds')
            combined_bias.write(combbiaspath, overwrite=True)
    else:
        try:
            if method == '1':
                combined_bias = CCDData.read('Master_Files/mbias_median.fits',
                                             unit='adu')
                combbiaspath = 'Master_Files/mbias_median.fits'
            elif method == '2':
                combined_bias = CCDData.read('Master_Files/mbias.fits',
                                             unit='adu')
                combbiaspath = 'Master_Files/mbias.fits'
        except:
            print('can\'t locate master bias, create or check directory')
            sys.exit()

    return tbiascollection, combined_bias, combbiaspath, combtime
示例#17
0
 def test_sort_handles_cannot_form_tree(self, set_test_files):
     # the object keyword is stripped from all headers, which means
     # you cannot for a tree for the light files. As a result, all
     # light files should end up in "unsorted" and no error should
     # be raised.
     images = ImageFileCollection(self.test_dir.strpath,
                                  keywords=['imagetyp', 'object'])
     n_light = 0
     for header in images.headers(overwrite=True, imagetyp='LIGHT'):
         try:
             del header['object']
         except KeyError:
             pass
         n_light += 1
     dest = self.test_dir.mkdtemp()
     sort_files.main(['-d', dest.strpath, self.test_dir.strpath])
     unsorted_path = os.path.join(dest.strpath, 'LIGHT',
                                  sort_files.UNSORTED_DIR)
     assert len(os.listdir(unsorted_path)) == n_light
示例#18
0
    def test_run_astrometry_with_dest_does_not_modify_source(self):

        destination = self.test_dir.make_numbered_dir()
        list_before = self.test_dir.listdir(sort=True)
        arglist = [
            '--destination-dir', destination.strpath, self.test_dir.strpath
        ]
        run_astrometry.main(arglist)
        list_after = self.test_dir.listdir(sort=True)
        # nothing should change in the source directory
        assert (list_before == list_after)
        # for each light file in the destination directory we should have a
        # file with the same basename but an extension of blind
        ic = ImageFileCollection(destination.strpath, keywords=['IMAGETYP'])
        for image in ic.files_filtered(imagetyp='LIGHT'):
            image_path = destination.join(image)
            print(image_path.purebasename)
            blind_path = destination.join(image_path.purebasename + '.blind')
            print(blind_path.strpath)
            assert (blind_path.check())
示例#19
0
def trim_bias(refresh=False):
    biascollection = ImageFileCollection('HD115709/bias', ext=4)
    flag = 0
    tbiaspathlist=[]
    if refresh == True:
        for ccdb, biasn in biascollection.ccds(return_fname=True, ccd_kwargs={'unit': 'adu'}):

            print('trimming', biasn)

            ccdb.header['imtype'] = ('bias', 'type of image')
            if flag == 0:
                print('all biases will be trimmed to :', ccdb.meta['trimsec'])
                flag = 1
            tbias = ccdp.trim_image(ccdb, fits_section=str(ccdb.meta['trimsec']))
            tbias.meta['imtype'] = ('trimmed bias', 'type of image')
            tbias.meta['taxis1'] = (2048, 'dimension1')
            tbias.meta['taxis2'] = (4096, 'dimension2')
            tbias.write('Trimmed_Bias/' + biasn[0:8] + '_trim.fits', overwrite=True)
            tbiaspathlist.append('Trimmed_Bias/' + biasn[0:8] + '_trim.fits')
    return biascollection,tbiaspathlist
示例#20
0
    def get_instrument(self, night_folder):
        """Identify Goodman's Camera

        Goodman has two camera, *Blue* and *Red*. They are, as the name suggest
        optimized for bluer and redder wavelength respectively. Their headers
        are different so this methods uses their differences to discover which
        camera the data belong to. The red camera has an specific keyword that
        says which camera is but the blue does not.
        The result is stored as an attribute of the class.

        Notes:
            As of April 2017 the blue camera computer was upgraded and as a
            result the headers where updated too. But we need to keep this
            feature for *backward compatibility*

        Args:
            night_folder (str): The full path for the raw data location

        """
        while True:
            try:
                ifc = ImageFileCollection(night_folder)
                self.image_collection = ifc.summary.to_pandas()

                self.objects_collection = self.image_collection[
                    self.image_collection.obstype != 'BIAS']

                if len(self.objects_collection) > 0:

                    indexes = self.objects_collection.index.tolist()
                    index = random.choice(indexes)

                    try:

                        self.instrument = \
                            self.objects_collection.instconf[index]

                    except AttributeError as error:
                        log.error(error)
                        # print(self.objects_collection.file[index])
                        self.instrument = 'Blue'
                else:
                    log.error('There is no useful data in this folder.')
            except ValueError as error:
                if 'Inconsistent data column lengths' in str(error):

                    log.error('There are duplicated keywords in the headers. '
                              'Fix it first!')

                    fix_duplicated_keywords(night_folder)
                    continue
                else:
                    log.error('Unknown Error: ' + str(error))
            break
示例#21
0
def get_nightly_image_list(date, telescope="C28"):
    path = os.path.join(config.get("WISE", "OBS_PATH"), config.get(telescope, "OBS_DIR"))
    if telescope == "C28":
        path = os.path.join(path, date + "c28")
    elif telescope == "C18":
        path = os.path.join(path, date + "c18")
    else:
        path = os.path.join(path, date)
    imlist = ImageFileCollection(path)

    return imlist
示例#22
0
def sub_bias(refresh='2', bias='2'):
    tflatcollection = ImageFileCollection('Trimmed_Flat')
    if bias == '1':
        biaspath = 'Master_Files/mbias_median.fits'
        dest = 'Trimmed_Flat/subflatsmed/'
    elif bias == '2':
        biaspath = 'Master_Files/mbias.fits'
        dest = 'Trimmed_Flat/subflatssig/'
    if refresh == '1':
        subflatpathlist = []
        mbias = CCDData.read(biaspath, unit='adu')
        for ccdf, flatn in tflatcollection.ccds(imtype='trimmed flat',
                                                return_fname=True):
            subflat = ccdp.subtract_bias(ccdf, mbias, add_keyword='subbias')
            subflat.meta['imtype'] = ('subflat', 'bias subtracted flat')
            subflat.write(dest + flatn[0:8] + '_subbias.fits', overwrite=True)
            subflatpathlist.append(dest + flatn[0:8] + '_subbias.fits')
    else:
        try:
            subflatcollection = ImageFileCollection(dest)
            subflatpathlist = subflatcollection.files_filtered(
                imtype='subflat', include_path=True)
            print('found', len(subflatpathlist), 'subflats')
        except:
            print('can\'t locate subflats, create or check directory')
            sys.exit()
    return tflatcollection, subflatpathlist
示例#23
0
def trim_flat(refresh='2'):
    flatcollection = ImageFileCollection('HD115709/flat_SII', ext=4)
    flag = 0
    tflatpathlist = []
    if refresh == '1':
        for ccdf, flatn in flatcollection.ccds(return_fname=True,
                                               ccd_kwargs={'unit': 'adu'}):
            if flag == 0:
                print('all flats will be trimmed to :', ccdf.meta['trimsec'])
                flag = 1

            print('trimming', flatn)

            tflat = ccdp.trim_image(ccdf,
                                    fits_section=str(ccdf.meta['trimsec']))
            tflat.meta['imtype'] = ('trimmed flat', 'type of image')
            tflat.meta['taxis1'] = (2048, 'dimension1')
            tflat.meta['taxis2'] = (4096, 'dimension2')
            tflat.write('Trimmed_Flat/' + flatn[0:8] + '_trim.fits',
                        overwrite=True)
            tflatpathlist.append('Trimmed_Flat/' + flatn[0:8] + '_trim.fits')
        print('created', len(tflatpathlist), 'trimmed flats')
    elif refresh == '2':
        try:
            tflatcollection = ImageFileCollection('Trimmed_Flat')
            tflatpathlist = tflatcollection.files_filtered(
                imtype='trimmed flat', include_path=True)
            print('found', len(tflatpathlist), 'trimmed flats')
        except:
            print('can\'t locate trimmed flats, create or check directory')
            sys.exit(0)
    return flatcollection, tflatpathlist
示例#24
0
def trim_bias(refresh='2'):
    biascollection = ImageFileCollection('HD115709/bias', ext=4)
    flag = 0
    if refresh == '1':
        tbiaspathlist = []
        for ccdb, biasn in biascollection.ccds(return_fname=True,
                                               ccd_kwargs={'unit': 'adu'}):
            if flag == 0:
                print('all biases will be trimmed to :', ccdb.meta['trimsec'])
                flag = 1

            print('trimming', biasn)

            tbias = ccdp.trim_image(ccdb,
                                    fits_section=str(ccdb.meta['trimsec']))
            tbias.meta['imtype'] = ('trimmed bias', 'type of image')
            tbias.meta['taxis1'] = (2048, 'dimension1')
            tbias.meta['taxis2'] = (4096, 'dimension2')
            tbias.write('Trimmed_Bias/' + biasn[0:8] + '_trim.fits',
                        overwrite=True)
            tbiaspathlist.append('Trimmed_Bias/' + biasn[0:8] + '_trim.fits')
        print('created', len(tbiaspathlist), 'trimmed biases')
    else:
        try:
            tbiascollection = ImageFileCollection('Trimmed_Bias')
            tbiaspathlist = tbiascollection.files_filtered(
                imtype='trimmed bias', include_path=True)
            print('found', len(tbiaspathlist), 'trimmed bias')
        except:
            print('can\'t locate trimmed biases, create or check directory')
            sys.exit()
    return biascollection, tbiaspathlist
示例#25
0
def main(source_d, destination_d, thumbnail_size=150):
    """
    Create a directory of jpegs from a directory of FITS files, optionally
    creating a subdirectory of jpeg thumbnails.

    Parameters
    ----------

    source_d : str
        Path to the directory of FITS files.

    destination_d : str
        Path to the directory in which JPEGs will be placed.

    thumbnail_size: float
        Dimension of thumbnail image. Set to zero to not produce thumbnails.
    """
    thumbnail_dir = 'thumbnail'

    ic = ImageFileCollection(source_d, keywords='*')

    mkdir_even_if_it_exists(destination_d)

    if thumbnail_size:
        mkdir_even_if_it_exists(os.path.join(destination_d, thumbnail_dir))

    for data, fname in ic.data(return_fname=True):
        scaled_data = scale_and_downsample(data)
        base, _ = os.path.splitext(os.path.basename(fname))
        dest_path = os.path.join(destination_d, base + '.jpg')
        mimg.imsave(dest_path, scaled_data, cmap="gray")

        if thumbnail_size:
            tiny = np.array(data.shape) // thumbnail_size
            thumb = block_reduce(scaled_data, block_size=tuple(tiny))
            thumb_path = os.path.join(destination_d, thumbnail_dir,
                                      base + '.jpg')
            mimg.imsave(thumb_path, thumb, cmap='gray')
示例#26
0
def bias_combine(refresh=False, method=1):
    if refresh == 0:
        tbiascollection = ImageFileCollection('Trimmed_Bias')
        print('found', len(tbiascollection.values('file')), 'trimmed biases')
        start=time.time()
        if method == 1:
            combined_bias = ccdp.combine(tbiascollection.files_filtered(
            imtype='trimmed bias', include_path=True),
            method='median')
            combbiaspath = 'Master_Files/mbias_median.fits'
            combined_bias.meta['combined'] = 'median'
        elif method == 2:
            combined_bias = ccdp.combine(tbiascollection.files_filtered(
            imtype='trimmed bias', include_path=True),
            sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
            sigma_clip_func=np.nanmedian, sigma_clip_dev_func=mad_std)
            combbiaspath = 'Master_Files/mbias.fits'
            combined_bias.meta['combined'] = 'sigma_clip average'
        combtime=time.time()-start
        print('combination took',combtime,'seconds')
        combined_bias.write('Master_Files/mbias_median.fits',
                            overwrite=True)
    return tbiascollection,combined_bias,combbiaspath,combtime
示例#27
0
def get_files(filenames,location='.',imagetyp=None,filter=None,fexptime='*'):
    '''Gather files from input directories, choosing by image type filter
    if specified'''

    # for summary display only
    keywords = ('object','date-obs','IMAGETYP','FILTER','EXPTIME')
    
    collection = ImageFileCollection(filenames=filenames,location=location,
                                     keywords=keywords)
    
    if imagetyp in ['light','bias','dark']:
        imagetyp = '%s frame' % imagetyp
    else:
        imagetyp = '*'

    if fexptime is None:
        fexptime = '*'

    if imagetyp == 'bias frame':
        fexptime = 0.

    # filter and reread
    if filter is None:
        filelist = collection.files_filtered(IMAGETYP=imagetyp,EXPTIME=fexptime,include_path=True)
    else:
        filelist = collection.files_filtered(IMAGETYP=imagetyp,EXPTIME=fexptime,FILTER=filter,include_path=True)

    if not filelist:
        raise RuntimeError('No matching files found.')

    location = os.path.join(collection.location,os.path.dirname(filenames[0]))

    collection = ImageFileCollection(filenames=filelist,location=location,
                                     keywords=keywords)

    collection.summary.pprint()
    return collection
示例#28
0
def test_purge_bad_keywords_logic_for_conditionals(caplog):
    ic = ImageFileCollection(_test_dir, keywords=['imagetyp'])
    headers = [h for h in ic.headers()]
    a_header = headers[0]
    # should be no warnings the first time...
    ph.purge_bad_keywords(a_header)
    purge_warnings = get_patch_header_logs(caplog)
    assert not purge_warnings
    # re-purging should generate warnings...
    ph.purge_bad_keywords(a_header)
    purge_warnings = get_patch_header_logs(caplog)
    assert 'force' in purge_warnings
    assert 'removing' in purge_warnings
    # grab a clean header
    # want to get to a header with more than one bad keyword so that a
    # history is generated...
    for a_header in headers[1:]:
        software = ph.get_software_name(a_header)
        if len(software.bad_keywords) <= 1:
            continue
        else:
            break
    # delete one of the purge keywords for this software, which should ensure
    # that their is no history added to the header that contains the name of
    # this keyword
    key_to_delete = software.bad_keywords[0]
    print(software.bad_keywords)
    try:
        del a_header[key_to_delete]
    except KeyError:
        pass

    print(a_header)
    ph.purge_bad_keywords(a_header, history=True)
    print(a_header)
    assert all(key_to_delete.lower() not in h.lower()
               for h in a_header['HISTORY'])
    def __call__(self, pattern, *args, **kwargs):
        self.ic = ImageFileCollection(self.path,
                                      keywords=self.keywords,
                                      glob_include=pattern)
        self.pd_ic = self.ic.summary.to_pandas()

        grouped = self.pd_ic.groupby(
            by=['wavmode']).size().reset_index().rename(columns={0: 'count'})

        for i in grouped.index:
            wavmode = grouped.iloc[i]['wavmode']
            lamps = self.pd_ic.object[self.pd_ic.wavmode == wavmode].tolist()
            print("Wavmode: {:s}, N: {:d}".format(wavmode, len(lamps)))
            for obj in sorted(lamps):
                print("\t{:s}".format(obj))
示例#30
0
def load_files(target_dir, config, files, logger):
    from ccdproc import ImageFileCollection

    keywords = [
        'object', 'imagetyp', 'date-obs', 'telra', 'teldec', 'airmass',
        'filter', 'oairtemp', 'relhum', 'subbias', 'flatcor'
    ]
    ic = ImageFileCollection(location=target_dir,
                             filenames=files,
                             keywords=keywords)
    nbias = len(ic.files_filtered(imagetyp='bias'))
    nflat = len(ic.files_filtered(imagetyp='flat'))
    ndata = len(ic.files_filtered(imagetyp='object'))

    filters = []
    for h in ic.headers():
        if h['IMAGETYP'] in ['object', 'flat']:
            if h['FILTER'] not in filters:
                filters.append(h['FILTER'])

    filters.sort()
    #    flat_breakdown = []
    #    data_breakdown = []
    #    for filt in filters:
    #        for k in config.flats.values():
    #            flat_breakdown.append('{} {} {}'.format(
    #                len(ic.files_filtered(
    #                    imagetyp='flat', object=k, filter=filt)),
    #                filt, k))
    #        data_breakdown.append('{} {}'.format(
    #            len(ic.files_filtered(imagetyp='OBJECT', filter=filt)), filt))

    logger.info('{} files: {} bias, {} flats, {} object ({} filters)'.format(
        len(ic.files), nbias, nflat, ndata, len(filters)))

    return ic
示例#31
0
    def __call__(self):
        """Call method

        Creates a table with selected keywords that will allow to group the data
        in order to be classified according to the observational technique used,
        imaging or spectroscopy.

        Returns:
            data_container (object): Class used as storage unit for classified
            data.

        """

        ifc = ImageFileCollection(self.path, self.keywords)
        self.file_collection = ifc.summary.to_pandas()
        # add two columns that will contain the ra and dec in degrees

        self.file_collection['radeg'] = ''
        self.file_collection['decdeg'] = ''
        for i in self.file_collection.index.tolist():

            radeg, decdeg = ra_dec_to_deg(self.file_collection.obsra.iloc[i],
                                          self.file_collection.obsdec.iloc[i])

            self.file_collection.iloc[
                i, self.file_collection.columns.get_loc('radeg')] = \
                '{:.2f}'.format(radeg)

            self.file_collection.iloc[
                i, self.file_collection.columns.get_loc('decdeg')] = \
                '{:.2f}'.format(decdeg)
            # now we can compare using degrees

        self.initial_checks()
        self.all_datatypes = self.file_collection.obstype.unique()
        if self.technique == 'Spectroscopy':
            self.spectroscopy_night(file_collection=self.file_collection,
                                    data_container=self.data_container)
        elif self.technique == 'Imaging':
            self.imaging_night()

        if self.data_container.is_empty:
            log.debug('data_container is empty')
            sys.exit('ERROR: There is no data to process!')
        else:
            log.debug('Returning classified data')
            return self.data_container
import ccdproc 
from ccdproc import CCDData
from datetime import datetime

from ccdproc import ImageFileCollection

if len(sys.argv)!=2: 
   print('Usage:\npython wht_calibrate_objects.py [full_path_to_reduced_data]\n')
   exit()

indir = sys.argv[1]

os.chdir(indir)

#change this to point to your raw data directory
ic1 = ImageFileCollection(indir.replace('sci2', '20160115'))

#create an array of dates
arm = 'Red arm'
file_list = []
date_list=[]
for hdu, fname in ic1.hdus(obstype='Arc', isiarm=arm, return_fname=True):
    if os.path.isfile('w_arc_'+os.path.basename(fname)):
        d = hdu.header['DATE-OBS'] + ' ' + hdu.header['UT']
        d = datetime.strptime(d, '%Y-%m-%d %H:%M:%S.%f')
        file_list.append(fname)
        date_list.append(d)
date_arr = np.array(date_list)

#reduce the object frames
for filename in ic1.files_filtered(obstype='TARGET', isiarm=arm):
from ccdproc import CCDData

from ccdproc import ImageFileCollection

if len(sys.argv)!=3: 
   print('Usage:\npython wht_basic_rection.py [full_path_to_raw_data] [full_path_to_reduced_data]\n')
   exit()

indir = sys.argv[1]
outdir = sys.argv[2]

if not os.path.isdir(outdir): os.mkdir(outdir)
os.chdir(outdir)

#change this to point to your raw data directory
ic1 = ImageFileCollection(indir)

#create the bias frames
blue_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Blue arm'):
    print ic1.location + filename
    ccd = CCDData.read(ic1.location + filename, unit = u.adu)
    #this has to be fixed as the bias section does not include the whole section that will be trimmed
    ccd = ccdproc.subtract_overscan(ccd, median=True,  overscan_axis=0, fits_section='[1:966,4105:4190]')
    ccd = ccdproc.trim_image(ccd, fits_section=ccd.header['TRIMSEC'] )
    blue_bias_list.append(ccd)
master_bias_blue = ccdproc.combine(blue_bias_list, method='median')
master_bias_blue.write('master_bias_blue.fits', clobber=True)

red_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Red arm'):