Example #1
0
 def test_nimo(self):
     """ Test nimo step. Runs the step and makes sure
         that there is output data.
     """
     dp = DataParent(config=CONFFILE)
     dp.config['parentni']['filloutput'] = True
     step = dp.getobject('StepNIParent')
     step.config = dp.config
     do = step()
     self.assertIsInstance(do[0], DataParent)
Example #2
0
 def test_load(self):
     """ Test to load a fits file.
         Also tests if DataParent.load correctly finds
         data object for file
     """
     from darepype.drp import DataParent
     from darepype.drp import DataFits
     dp = DataParent(config=os.path.join(TESTDATA_FOLDER, 'testconf.txt'))
     df = dp.load(os.path.join(TESTDATA_FOLDER, 'testfit.fits'))
     self.assertIsInstance(df, DataFits)
     self.assertGreater(sum(df.image.shape), 0)
Example #3
0
 def test_load(self):
     """ Test to laod a text file.
         Also tests if DataParent.load correctly finds
         data object for file
     """
     from darepype.drp import DataParent
     from darepype.drp import DataText
     dp = DataParent(config=os.path.join(TESTDATA_FOLDER, 'testconf.txt'))
     dt = dp.load(os.path.join(TESTDATA_FOLDER, 'testtext.txt'))
     self.assertIsInstance(dt, DataText)
     self.assertGreater(len(dt.data), 0)
Example #4
0
 def test_multi(self):
     """ Test pipeline with mimo and miso steps
     """
     dp = DataParent(config=CONFFILE)
     pipe = dp.getobject('PipeLine')
     dp.log.info('  ==== START MISO/MIMO PIPE ====')
     do = pipe([FITSFILE, FITSFILE],
               dp.config,
               pipemode='multi',
               force=True)
     dp.log.info('  ==== END MISO/MIMO PIPE ====')
     self.assertIsInstance(do, DataParent)
Example #5
0
 def test_single(self):
     """ Test pipeline with siso step only
     """
     dp = DataParent(config=CONFFILE)
     pipe = dp.getobject('PipeLine')
     dp.log.info('  ==== START SISO PIPE ====')
     do = pipe([FITSFILE, FITSFILE],
               dp.config,
               pipemode='single',
               force=True)
     dp.log.info('  ==== END SISO PIPE ====')
     self.assertIsInstance(do, DataParent)
Example #6
0
 def test_mimo(self):
     """ Test a mimo step. Runs the step and makes sure
         that the output data is as expected.
     """
     dp = DataParent(config=CONFFILE)
     df1 = dp.load(FITSFILE)
     df2 = dp.load(FITSFILE)
     step = dp.getobject('StepMOParent')
     head = df1.header.copy()
     img = df1.image.copy()
     do1, _do2 = step([df1, df2])
     self.assertEqual(np.sum(img - do1.image), 0)
     self.assertNotEqual(head, do1.header)
Example #7
0
 def test_siso(self):
     """ Test siso step. Runs the step and makes sure
         that the data is the same but that the header
         has changed.
     """
     dp = DataParent(config=CONFFILE)
     df = dp.load(FITSFILE)
     head = df.header.copy()
     img = df.image.copy()
     step = dp.getobject('StepParent')
     do = step(df)
     self.assertEqual(np.sum(img - do.image), 0)
     self.assertNotEqual(head, do.header)
Example #8
0
 def test_config(self):
     """ Tests loading the configuration
     """
     from darepype.drp import DataParent
     conf = os.path.join(TESTDATA_FOLDER, 'testconf.txt')
     dp = DataParent(config=conf)
     self.assertIn('general', dp.config)
Example #9
0
 def test_init(self):
     """ Test make an object
     """
     from darepype.drp import dataparent
     dp = dataparent.DataParent()
     self.assertIsInstance(dp, dataparent.DataParent)
     from darepype.drp import DataParent
     dp = DataParent()
     self.assertIsInstance(dp, DataParent)
Example #10
0
 def loadauxfile(self, auxpar='', data=None, multi=False):
     """ Uses loadauxname to search for files matching auxfile.
         See loadauxname for parameter description.
         
         A pipedata object with the best match is returned.
     """
     # Get auxname
     auxname = self.loadauxname(auxpar, data, multi)
     # Load auxdata
     if multi:
         auxdata = [
             DataParent(config=self.config).load(auxnam)
             for auxnam in auxname
         ]
     else:
         auxdata = DataParent(config=self.config).load(auxname)
     # Return selected file
     return auxdata
Example #11
0
 def test_imageHDUs(self):
     """ Test image, imageindex, imageget, imageset, imagedel
     """
     # Load test file
     from darepype.drp import DataParent
     dp = DataParent(config=os.path.join(TESTDATA_FOLDER, 'testconf.txt'))
     df = dp.load(os.path.join(TESTDATA_FOLDER, 'testfit.fits'))
     # Name the hdu
     df.imgnames[0] = "FIRST IMAGE"
     # Make copy first image to second hdu
     df.imageset(df.image, "Second Image", df.header)
     # Check imageindex for second image
     self.assertEqual(1, df.imageindex("Second Image"),
                      'check imageindex on 2 images')
     # Check imageget
     self.assertEqual(df.image.sum(), df.imageget("Second Image").sum())
     # Check imaagedel
     df.imagedel('First Image')
     self.assertEqual(3,
                      len(df.imgdata) + len(df.imgnames) + len(df.imgheads))
     self.assertEqual(0, df.imageindex("Second Image"), 'check imagedelete')
Example #12
0
 def test_getheader(self):
     """ Test header functions: Set and get header keywords
     """
     # Load test file
     from darepype.drp import DataParent
     dp = DataParent(config=os.path.join(TESTDATA_FOLDER, 'testconf.txt'))
     df = dp.load(os.path.join(TESTDATA_FOLDER, 'testfit.fits'))
     # Name the hdu
     df.imgnames[0] = "FIRST IMAGE"
     # Make copy first image to second hdu
     df.imageset(df.image, "Second Image", df.header)
     # Get getting values
     self.assertEqual(2, df.getheadval('NAXIS'), 'getheadval from header')
     self.assertEqual(3, df.getheadval('TESTVAL'),
                      'getheadval from config[header]')
     # Set value in each header
     df.setheadval('AVALUE', 10)
     df.setheadval('BVALUE', 20, None, 'Second Image')
     # Check values
     self.assertEqual(10, df.getheadval('AVALUE'),
                      'check setheadval primary header')
     self.assertEqual(20, df.getheadval('BVALUE', 'allheaders'),
                      'check setheadval second header')
Example #13
0
 def test(self):
     """ Test Pipe Step Parent Object:
         Runs a set of basic tests on the object
     """
     # log message
     self.log.info('Testing pipe step %s' % self.name)
     # Set up the step
     self.name = 'loadaux'  # this is not used normally as loadaux is normally used as parent
     self.loadauxsetup('test1')
     self.loadauxsetup('test2')
     for par in self.paramlist:
         print(par)
     # Load input data
     self.datain = DataParent(config=self.config).load('IN_a0_1.fits')
     # Get test1 auxfile
     auxf = self.loadauxname('test1', multi=True)
     print('********** ' + repr(auxf))
     # Get test2 auxfile
     auxf = self.loadauxname('test2')
     print('********** ' + repr(auxf))
     # log message
     self.log.info('Testing pipe step %s - Done' % self.name)
Example #14
0
 def run(self):
     """ Runs the data reduction algorithm. The self.datain is run
         through the code, the result is in self.dataout.
     """
     ### Get redstep if it's not loaded
     if self.redstep == None:
         # Get the step
         datap = DataParent(config=self.config)
         self.redstep = datap.getobject(self.getarg('redstepname'))
     ### Group the input files
     # Setup datagroups, get keys and key formats
     datagroups = []
     groupkeys = self.getarg('groupkeys').split('|')
     groupkfmt = self.getarg('groupkfmt')
     if len(groupkfmt) == 0:
         groupkfmt = None
     else:
         groupkfmt = groupkfmt.split('|')
     # Loop over files
     for data in self.datain:
         groupind = 0
         # Loop over groups until group match found or end reached
         while groupind < len(datagroups):
             # Check if data fits group
             found = True
             gdata = datagroups[groupind][0]
             for keyi in range(len(groupkeys)):
                 # Get key from group and new data - format if needed
                 key = groupkeys[keyi]
                 dkey = data.getheadval(key, 'allheaders')
                 gkey = gdata.getheadval(key, 'allheaders')
                 if groupkfmt != None:
                     dkey = groupkfmt[keyi] % dkey
                     gkey = groupkfmt[keyi] % gkey
                 # Compare
                 if dkey != gkey:
                     found = False
             # Found -> add to group
             if found:
                 datagroups[groupind].append(data)
                 break
             # Not found -> increase group index
             groupind += 1
         # If not in any group -> make new group
         if groupind == len(datagroups):
             datagroups.append([
                 data,
             ])
     # info messages
     self.log.debug(" Found %d data groups" % len(datagroups))
     for groupind in range(len(datagroups)):
         group = datagroups[groupind]
         msg = "  Group %d len=%d" % (groupind, len(group))
         for key in groupkeys:
             msg += " %s = %s" % (key, group[0].getheadval(
                 key, 'allheaders'))
         self.log.debug(msg)
     ### Reduce input files - collect output files
     self.dataout = []
     # Make new variables for groupidkeys and groupoutputs
     groupidkeys = []
     groupoutputs = []
     # Loop over groups -> save output in self.dataout
     for groupi in range(len(datagroups)):
         group = datagroups[groupi]
         # Get fileidkeys to see if unchanged groups should be re-reduced
         fileidkey = self.getarg('fileidkey')
         if len(fileidkey):
             # Get fileidkeys for the current new group
             newkeys = [dat.getheadval(fileidkey) for dat in group]
             copykeys = ['x']
             # Search for fit in existing groups: fit is index
             fit = -1
             for fit in range(len(self.groupidkeys)):
                 # Make copy of new keys
                 copykeys = list(newkeys)
                 # For each key in group[fit]
                 for val in self.groupidkeys[fit]:
                     if val in copykeys:
                         # Remove key from copykeys if found
                         del copykeys[copykeys.index(val)]
                     else:
                         # Else: group[fit] does not match, go to next group
                         copykeys = ['x']  # if all have been removed
                         break
                 # Check if any values left in copykeys
                 if len(copykeys) == 0:
                     # No values left in copykeys, group[fit] is valid match
                     break
             # Any values left in copykeys -> no match found
             if len(copykeys):
                 fit = -1
                 self.log.debug('New datagroup # %d has no previous match' %
                                groupi)
             else:
                 self.log.debug(
                     'New datagroup # %d matches previous group # %d' %
                     (groupi, fit))
         else:
             fit = -1
         # Reduce the data
         if fit < 0:
             try:
                 dataout = self.redstep(group)
             except Exception as error:
                 self.log.warn(
                     'Step %s failed for group %d on with file %s . . .' %
                     (self.getarg('redstepname'), groupi,
                      group[0].filename))
                 self.log.warn('message = %s' % str(error) +
                               ' - skipping group')
                 self.log.warn('traceback = %s' % traceback.format_exc())
                 continue
             # Add groupoutputs and groupidkeys
             if len(fileidkey):
                 groupoutputs.append(dataout)
                 idkeys = [dat.getheadval(fileidkey) for dat in group]
                 groupidkeys.append(idkeys)
             # Save output if requested
             if self.getarg('saveoutput'):
                 if issubclass(dataout.__class__, DataParent):
                     data.save()
                 else:
                     for data in dataout:
                         data.save()
         else:
             groupoutputs.append(self.groupoutputs[fit])
             groupidkeys.append(self.groupidkeys[fit])
             dataout = self.groupoutputs[fit]
         # add output to dataout
         if issubclass(dataout.__class__, DataParent):
             self.dataout.append(dataout)
         else:
             for data in dataout:
                 self.dataout.append(dataout)
     # Copy groupidkeys and groupoutputs
     self.groupoutputs = groupoutputs
     self.groupidkeys = groupidkeys
     # Set procname to redstep.procname
     self.procname = self.redstep.procname
Example #15
0
 def run(self, inpar='', data=None, multi=False):
     # Loads all files base on glob, parameter 'filelocation'
     infile = self.getarg('filelocation')
     inglob = (datetime.strftime(datetime.now(), infile))
     inglob = os.path.expandvars(inglob)
     indata = glob.glob(inglob)
     self.log.debug('Files found: %s' % indata)
     infilenameinclude = self.getarg('fileinclude').split('|')
     ininclude = []
     innamelist = []
     # From all loaded inputs, includes files in final list which have certain strings in the filename
     if infilenameinclude[0] != '':
         for i in indata:
             split = i.split('/')[len(i.split('/')) - 1]
             innamelist.append(split)
         count = 0
         for i in indata:
             for f in infilenameinclude:
                 if f in innamelist[count]:
                     if f not in ininclude:
                         ininclude.append(i)
             count += 1
     else:
         ininclude = indata
     infilenameexclude = self.getarg('fileexclude').split('|')
     inexclude = []
     exnamelist = []
     # From all loaded inputs, excludes files from final list which have certain strings in the filename
     for i in indata:
         split = i.split('/')[len(i.split('/')) - 1]
         exnamelist.append(split)
     count = 0
     for i in indata:
         for f in infilenameexclude:
             if f in exnamelist[count]:
                 if f not in inexclude:
                     inexclude.append(i)
         count += 1
     self.log.debug('File(s) excluded by filename: %s' % inexclude)
     indatafinal = list(set(ininclude) - set(inexclude))
     headlist = []
     for innam in indatafinal:
         headlist.append(DataParent(config=self.config).loadhead(innam))
     includelist = self.getarg('includeheadvals').split('|')
     keysinclude = []
     inheadinclude = []
     # From all loaded inputs, includes files in final list which have certain keywords in the fits header
     if includelist[0] != '':
         for f in headlist:
             for i in includelist:
                 keysinclude = i.split('=')
                 if str(f.getheadval(keysinclude[0])) == str(
                         keysinclude[1]):
                     if f not in inheadinclude:
                         inheadinclude.append(f)
     else:
         inheadinclude = headlist
     inheadexclude = []
     # From all loaded inputs, includes files in final list which have certain keywords in the fits header
     excludelist = self.getarg('excludeheadvals').split('|')
     if excludelist[0] != '':
         for f in headlist:
             for i in excludelist:
                 keysexclude = i.split('=')
                 if str(f.getheadval(keysexclude[0])) == str(
                         keysexclude[1]):
                     if f not in inheadexclude:
                         inheadexclude.append(f)
     headlistfinal = list(set(inheadinclude) - (set(inheadexclude)))
     finalfiles = []
     for files in headlistfinal:
         finalfiles.append(files.filename)
     # Sorts final output files
     finalsorted = sorted(finalfiles)
     self.dataout = []
     # Appends final list of loaded files to dataout
     for f in finalsorted:
         self.dataout.append(DataParent(config=self.config).load(f))
Example #16
0
    def loadauxname(self, auxpar='', data=None, multi=False):
        """ Searches for files matching auxfile. If only one match is
            found, that file is returned. Else the header
            keywords listed in auxfitkeys are matched between the
            data and the auxfiles which were found. The first auxfile
            for which these keywords values best match the ones
            from data is selected. The filename of the best match
            is returned.
            
            auxpar: A name for the aux file parameter to use. This
                    allows loadauxfiles to be used multiple times
                    in a given pipe step (for example for darks and
                    flats). Default value is self.auxpar which is set
                    by loadauxsetup().
            data: A pipedata object to match the auxiliary file to.
                  If no data is specified self.datain is used (for
                  Multi Input steps self.datain[0]).
        """
        ### Setup
        # Set auxpar
        if len(auxpar) == 0:
            auxpar = self.auxpar
        # Get parameters
        auxfile = os.path.expandvars(self.getarg(auxpar + 'file'))
        self.log.debug("Looking for files under %s" % auxfile)
        fitkeys = self.getarg(auxpar + 'fitkeys')
        if len(fitkeys) == 1 and len(fitkeys[0]) == 0:
            fitkeys = []
        ### Look for files - return in special cases
        # Glob the list of files
        auxlist = glob.glob(auxfile)
        # If no file found - look in backup folder
        if len(auxlist) < 1:
            self.log.warn('No files found under %s - looking in backup' %
                          auxfile)
            auxback = os.path.expandvars(self.getarg('bkup' + auxpar))
            if len(auxback):
                auxfile = auxback
                auxlist = glob.glob(auxfile)
        # Throw exception if no file found
        if len(auxlist) < 1:
            msg = 'No %s files found under %s' % (auxpar, auxfile)
            self.log.error(msg)
            raise ValueError(msg)

        # Get datain object (depends on step being SingleInput or MultiInput)
        if data == None:
            if issubclass(self.__class__, StepMIParent):
                data = self.datain[0]
            else:
                data = self.datain
        # Return unique file, or all files if fitkeys is empty
        if len(auxlist) == 1 or len(fitkeys) == 0:
            if len(auxlist) == 1:
                self.log.info('LoadAuxName: Found unique file = %s' %
                              auxlist[0])
            else:
                self.log.info(
                    'LoadAuxName: No fitkeys: Return first %sfile match = %s' %
                    (self.auxpar, auxlist[0]))
            data.setheadval(
                'HISTORY', '%s: Best %sfile = %s' % (
                    self.name,
                    self.auxpar,
                    os.path.split(auxlist[0])[1],
                ))
            if multi:
                return auxlist
            else:
                return auxlist[0]
        ### Select files with Fitkeys
        # check format (make first element uppercase)
        try:
            _ = fitkeys[0].upper()
        except AttributeError:
            # AttributeError if it's not a string
            self.log.error('LoadAuxFile: fitkeys config parameter is ' +
                           'incorrect format - need list of strings')
            raise TypeError('fitkeys config parameter is incorrect format' +
                            ' - need list of strings')
        # Load all headers from auxlist into a auxheadlist (pipedata objects)
        auxheadlist = []
        for auxnam in auxlist:
            auxheadlist.append(DataParent(config=self.config).loadhead(auxnam))
        # Look through keywords, only keep auxfiles which fit keys
        for key in fitkeys:
            newheadlist = []
            # Look through auxfiles, transfer good ones
            if key in 'DATE-OBS':  # SPECIAL CASE DATE-OBS:
                # get time for data
                datime = time.mktime(
                    time.strptime(data.getheadval('DATE-OBS'),
                                  '%Y-%m-%dT%H:%M:%S'))
                # get time offset (from data) for each auxfile
                auxtimes = []
                for auxhead in auxheadlist:
                    auxtime = time.mktime(
                        time.strptime(auxhead.getheadval('DATE-OBS'),
                                      '%Y-%m-%dT%H:%M:%S'))
                    auxtimes.append(abs(auxtime - datime))
                # only keep auxfiles which are within daterange of closest auxfile
                mindiff = min(auxtimes)
                timerange = self.getarg('daterange') * 86400
                for auxi in range(len(auxheadlist)):
                    if auxtimes[auxi] - mindiff < timerange:
                        newheadlist.append(auxheadlist[auxi])
            else:  # Normal Keyword compare
                for auxhead in auxheadlist:
                    # Check if the auxfile fits (compare with data)
                    if auxhead.getheadval(key) == data.getheadval(key):
                        # it fits -> add to newheadlist
                        newheadlist.append(auxhead)
            # break key loop if no files left
            if len(newheadlist) == 0:
                break
            else:
                auxheadlist = newheadlist

        ### Select file to return
        if multi:
            # Return all filenames
            auxname = [aux.filename for aux in auxheadlist]
            # Return message
            if len(auxname) > 3:
                listnames = "%d files: %s to %s" % (len(auxname), auxname[0],
                                                    auxname[-1])
            else:
                listnames = string.join(auxname)
            if len(newheadlist) > 0:
                self.log.info('LoadAuxName: Matching %s found are <%s>' %
                              (auxpar, listnames))
            else:
                self.log.warn('LoadAuxName: NO MATCH finding aux files')
                self.log.warn('Returning files <%s>' % listnames)
        else:
            # Return first filename
            auxname = auxheadlist[0].filename
            # Select best file
            if len(newheadlist) > 0:
                self.log.info('LoadAuxName: Matching %s found is <%s>' %
                              (auxpar, auxname))
            else:
                self.log.warn('LoadAuxName: NO MATCH finding aux file')
                self.log.warn('Returning first file <%s>' % auxname)
            listnames = auxname  # just so we can use it below
        data.setheadval('HISTORY',
                        '%s: Best %s = %s' % (self.name, auxpar, listnames))
        # Return selected file
        return auxname
Example #17
0
datafolder = '/Users/josh/Desktop/pipeline_test/data'
# Location of config files
baseconfig = os.path.join(codefolder, 'pipeline', 'Developments',
                          'stepwebastrometry', 'pipeconf_stonedge_auto.txt')
logging.basicConfig(
    filename=logfilename,
    level=logging.DEBUG,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")

os.chdir(codefolder)
if not os.path.exists(datafolder):
    os.mkdir(datafolder)
infilenames = [os.path.join(datafolder, f) for f in filenames]

### Load a file into a DataFits object
dparent = DataParent(config=baseconfig)
dfits = dparent.load(infilenames[0])
print(dfits.filename)

### Look at the FITS header of the loaded object
# print(repr(dfits.header))

### OPTIONAL BUT RECOMMENDED: Check if all necessary files exist
error_flag = False
# Check if configuration file exists
if not os.path.exists(baseconfig):
    print(
        'ERROR: The config file you specified, %s,\n  does NOT exist on your computer, fix "config" above'
        % baseconfig)
    error_flag = True
# Check if input files exist