def test_nimo(self): """ Test nimo step. Runs the step and makes sure that there is output data. """ dp = DataParent(config=CONFFILE) dp.config['parentni']['filloutput'] = True step = dp.getobject('StepNIParent') step.config = dp.config do = step() self.assertIsInstance(do[0], DataParent)
def test_multi(self): """ Test pipeline with mimo and miso steps """ dp = DataParent(config=CONFFILE) pipe = dp.getobject('PipeLine') dp.log.info(' ==== START MISO/MIMO PIPE ====') do = pipe([FITSFILE, FITSFILE], dp.config, pipemode='multi', force=True) dp.log.info(' ==== END MISO/MIMO PIPE ====') self.assertIsInstance(do, DataParent)
def test_single(self): """ Test pipeline with siso step only """ dp = DataParent(config=CONFFILE) pipe = dp.getobject('PipeLine') dp.log.info(' ==== START SISO PIPE ====') do = pipe([FITSFILE, FITSFILE], dp.config, pipemode='single', force=True) dp.log.info(' ==== END SISO PIPE ====') self.assertIsInstance(do, DataParent)
def test_mimo(self): """ Test a mimo step. Runs the step and makes sure that the output data is as expected. """ dp = DataParent(config=CONFFILE) df1 = dp.load(FITSFILE) df2 = dp.load(FITSFILE) step = dp.getobject('StepMOParent') head = df1.header.copy() img = df1.image.copy() do1, _do2 = step([df1, df2]) self.assertEqual(np.sum(img - do1.image), 0) self.assertNotEqual(head, do1.header)
def test_siso(self): """ Test siso step. Runs the step and makes sure that the data is the same but that the header has changed. """ dp = DataParent(config=CONFFILE) df = dp.load(FITSFILE) head = df.header.copy() img = df.image.copy() step = dp.getobject('StepParent') do = step(df) self.assertEqual(np.sum(img - do.image), 0) self.assertNotEqual(head, do.header)
def run(self): """ Runs the data reduction algorithm. The self.datain is run through the code, the result is in self.dataout. """ ### Get redstep if it's not loaded if self.redstep == None: # Get the step datap = DataParent(config=self.config) self.redstep = datap.getobject(self.getarg('redstepname')) ### Group the input files # Setup datagroups, get keys and key formats datagroups = [] groupkeys = self.getarg('groupkeys').split('|') groupkfmt = self.getarg('groupkfmt') if len(groupkfmt) == 0: groupkfmt = None else: groupkfmt = groupkfmt.split('|') # Loop over files for data in self.datain: groupind = 0 # Loop over groups until group match found or end reached while groupind < len(datagroups): # Check if data fits group found = True gdata = datagroups[groupind][0] for keyi in range(len(groupkeys)): # Get key from group and new data - format if needed key = groupkeys[keyi] dkey = data.getheadval(key, 'allheaders') gkey = gdata.getheadval(key, 'allheaders') if groupkfmt != None: dkey = groupkfmt[keyi] % dkey gkey = groupkfmt[keyi] % gkey # Compare if dkey != gkey: found = False # Found -> add to group if found: datagroups[groupind].append(data) break # Not found -> increase group index groupind += 1 # If not in any group -> make new group if groupind == len(datagroups): datagroups.append([ data, ]) # info messages self.log.debug(" Found %d data groups" % len(datagroups)) for groupind in range(len(datagroups)): group = datagroups[groupind] msg = " Group %d len=%d" % (groupind, len(group)) for key in groupkeys: msg += " %s = %s" % (key, group[0].getheadval( key, 'allheaders')) self.log.debug(msg) ### Reduce input files - collect output files self.dataout = [] # Make new variables for groupidkeys and groupoutputs groupidkeys = [] groupoutputs = [] # Loop over groups -> save output in self.dataout for groupi in range(len(datagroups)): group = datagroups[groupi] # Get fileidkeys to see if unchanged groups should be re-reduced fileidkey = self.getarg('fileidkey') if len(fileidkey): # Get fileidkeys for the current new group newkeys = [dat.getheadval(fileidkey) for dat in group] copykeys = ['x'] # Search for fit in existing groups: fit is index fit = -1 for fit in range(len(self.groupidkeys)): # Make copy of new keys copykeys = list(newkeys) # For each key in group[fit] for val in self.groupidkeys[fit]: if val in copykeys: # Remove key from copykeys if found del copykeys[copykeys.index(val)] else: # Else: group[fit] does not match, go to next group copykeys = ['x'] # if all have been removed break # Check if any values left in copykeys if len(copykeys) == 0: # No values left in copykeys, group[fit] is valid match break # Any values left in copykeys -> no match found if len(copykeys): fit = -1 self.log.debug('New datagroup # %d has no previous match' % groupi) else: self.log.debug( 'New datagroup # %d matches previous group # %d' % (groupi, fit)) else: fit = -1 # Reduce the data if fit < 0: try: dataout = self.redstep(group) except Exception as error: self.log.warn( 'Step %s failed for group %d on with file %s . . .' % (self.getarg('redstepname'), groupi, group[0].filename)) self.log.warn('message = %s' % str(error) + ' - skipping group') self.log.warn('traceback = %s' % traceback.format_exc()) continue # Add groupoutputs and groupidkeys if len(fileidkey): groupoutputs.append(dataout) idkeys = [dat.getheadval(fileidkey) for dat in group] groupidkeys.append(idkeys) # Save output if requested if self.getarg('saveoutput'): if issubclass(dataout.__class__, DataParent): data.save() else: for data in dataout: data.save() else: groupoutputs.append(self.groupoutputs[fit]) groupidkeys.append(self.groupidkeys[fit]) dataout = self.groupoutputs[fit] # add output to dataout if issubclass(dataout.__class__, DataParent): self.dataout.append(dataout) else: for data in dataout: self.dataout.append(dataout) # Copy groupidkeys and groupoutputs self.groupoutputs = groupoutputs self.groupidkeys = groupidkeys # Set procname to redstep.procname self.procname = self.redstep.procname