def test_pipeline(self):
        # Make sure the test file has been unzipped.
        if not os.path.isfile(_TEST_INPUT_FILE2):
            if VERBOSE:
                print("Unzipping", _TEST_INPUT_FILE2_ZIP)
            zf = ZipFile(_TEST_INPUT_FILE2_ZIP)
            zf.extractall(_datapath)

        # Test the alternative pipeline API for the simulator.
        test_map = MiriIlluminationModel(_TEST_INPUT_FILE2)
        test_map.set_instrument_metadata(_DEFAULT_SCA)

        sca = SensorChipAssembly3(logger=LOGGER)
        with warnings.catch_warnings():  # Suppress FITS header warnings.
            warnings.simplefilter("ignore")
            exposure_data = sca.simulate_pipe(
                test_map,
                scale=1.0,
                fringemap=None,
                readout_mode=_DEFAULT_READOUT,
                subarray=None,
                nints=1,
                ngroups=10,
                temperature=6.5,
                cosmic_ray_mode='NONE',
                simulate_dark_current=SIMULATE_DARK,
                verbose=0)
            # The simulated data must have a valid shape and contain a
            # range of values.
            self.assertTrue(exposure_data.data.shape[0] > 0)
            self.assertTrue(exposure_data.data.shape[1] > 0)
            self.assertNotAlmostEqual(exposure_data.data.min(),
                                      exposure_data.data.max())
        del test_map, exposure_data, sca
 def test_subarray_modes(self):
     # Test the extraction of subarrays from full frame data.
     test_map = MiriIlluminationModel(_TEST_INPUT_FILE2)
     map_data = test_map.get_illumination()
     # The illumination data is assumed to have a valid shape and be
     # filled with a range of values (the data should have a slope).
     self.assertTrue(map_data.shape[0] > 0)
     self.assertTrue(map_data.shape[1] > 0)
     self.assertNotAlmostEqual(map_data.min(), map_data.max())
     if VERBOSE:
         print("\nTesting FULL to subarray modes: ", end='')
     if TEST_MODE_COVERAGE > 0.99:
         modes_to_test = list(detector_properties['SUBARRAY'].keys())
         random.shuffle(modes_to_test)
     else:
         nsamples = int(0.5 + len(list(detector_properties['SUBARRAY'].keys())) * \
                        TEST_MODE_COVERAGE)
         nsamples = max(nsamples, MINSAMPLES)
         modes_to_test = random.sample( \
                             list(detector_properties['SUBARRAY'].keys()),
                             nsamples)
     sca = SensorChipAssembly3(logger=LOGGER)
     with warnings.catch_warnings():  # Suppress FITS header warnings.
         warnings.simplefilter("ignore")
         for submode in modes_to_test:
             if VERBOSE:
                 print(submode + ' ', end='')
             sca.setup(_DEFAULT_SCA,
                       readout_mode=_DEFAULT_READOUT,
                       subarray=submode,
                       inttime=None,
                       ngroups=2,
                       nints=1,
                       temperature=6.5,
                       cosmic_ray_mode='NONE',
                       simulate_dark_current=SIMULATE_DARK,
                       verbose=0)
             sca.set_illumination(test_map)
             simulated_data = sca.exposure()
             # The simulated data must have a valid shape and contain a
             # range of values. (A more specific test is difficult because
             # read noise is included. The accuracy of subarray extraction
             # should already have been tested in the test_detector unit
             # tests.)
             self.assertTrue(simulated_data.shape[0] > 0)
             self.assertTrue(simulated_data.shape[1] > 0)
             self.assertNotAlmostEqual(simulated_data.min(),
                                       simulated_data.max())
     if VERBOSE:
         print("", end='')
     del test_map, sca
Example #3
0
                columns = subarray[3]
                if verbose > 0:
                    print("Defining test data for subarray %s." % subarray_str)
                    print("Setting columns=%d, rows=%d." % (rows, columns))
        except (KeyError, IndexError):
            strg = "Unrecognised or badly defined subarray mode: %s" % \
                subarray_str
            raise AttributeError(strg)

    # Create a new illumination data
    datavalues = create_cross_data(rows,
                                   columns,
                                   minvalue=minvalue,
                                   maxvalue=maxvalue,
                                   addfifth=addfifth)
    wdata = 5.0 * np.ones_like(datavalues)
    map_object = MiriIlluminationModel(intensity=datavalues, wavelength=wdata)
    map_object.set_instrument_metadata("MIRIMAGE")
    if subarray_str:
        map_object.set_subarray_metadata(subarray_str)

    if verbose > 1:
        print(map_object)
        map_object.plot()

    # Save the illumination map to a FITS file.
    map_object.save(outputfile, overwrite=overwrite)

    if verbose > 0:
        print("New illumination map saved to %s." % outputfile)
Example #4
0
def load_illumination_map(filename,
                          ftype='FITS',
                          metadata=None,
                          scale=1.0,
                          add_wavelength=True,
                          wavmin=1.0,
                          wavmax=30.0):
    """
    
    Create an IlluminationMap object from a file of detector illumination data.
                 
    :Parameters:
    
    filename: string or tuple of strings
        The name(s) of the file(s) to be opened.
    ftype: string, optional, default='FITS'
        The type of file from which to read the data.
        
        * If 'STSCI', the illumination map is read from a standard MIRI
          illumination data file, based on the STScI data model.
        * If 'FITS', intensity and wavelength data are obtained
          from a single FITS file containing INTENSITY, WAVELENGTH and
          DIRECTION extensions (with the latter two extensions being
          optional).
        * If 'ASCII', intensity and wavelength data are read from
          a list of 1, 2 or 3 ASCII files (with the latter two files
          being optional). Intensity values are multiplied by the given
          scale factor.
          ASCII files describing an illumination map define a value for every
          pixel. They should contain nrows lines, each of which contains
          ncolumns of blank-separated numbers; i.e. the default format
          expected by the numpy.loadtxt() function.
        * If 'IMAGE' (or 'JPEG'), intensity data are read from a standard
          image format file (such as JPEG, GIF or PPM) whose contents are
          converted into a composite image. A simple wavelength image may be
          added varying between a specified minimum and maximum wavelength,
          or wavelength data may be ignored. Intensity values are multiplied
          by the given scale factor.
          
    metadata: dictionary-like object, optional
        An object containing metadata to be associated with the data.
        It could be a plain Python dictionary, a Metadata object or
        a pyFits Header object, as long as it supports keyword operations.
        (Only used if the input file is ASCII or IMAGE.)
    scale: float, optional, default=1.0
        An optional scale factor to apply to the intensity data. This is
        more useful for ftype='IMAGE' data in cases where the data do not
        contain a photon flux.
    add_wavelength: boolean, optional, default=True
        Add a test wavelength map to the data. This parameter is valid
        only for ftype='IMAGE' data.
    wavmin: float, optional, default=1.0
        Minimum wavelength associated with data in microns. This parameter
        is valid only for ftype='IMAGE' data.
    wavmax: float, optional, default=30.0
        Maximum wavelength associated with data in microns. This parameter
        is valid only for ftype='IMAGE' data.
       
    :Raises:
    
    ValueError
        Raised if any of the parameters are out of range.
    IOError
        Raised if there is an error opening, reading or interpreting
        the data from the input file.
    ImportError
        Raised if the Python Imaging Library (PIL) is not available.
        
    :Returns:
    
    A new MiriIlluminationModel object.
        
    """
    # Attempt to open and then read the given file
    if ftype == 'STSCI' or ftype == 'STScI':

        # Read a MIRI data model in standard STScI format.
        illumination_map = MiriIlluminationModel(filename)
        illumination_map.apply_scale(scale)
        return illumination_map

    elif ftype == 'FITS':

        # Read a FITS file in the agreed detector illumination format.
        # The file must contain a primary FITS header and an INTENSITY
        # extension. It may also contain WAVELENGTH and DIRECTION
        # extensions.
        try:
            hdulist = pyfits.open(filename)

            # Read the contents of the file
            header = hdulist[0].header
            description = 'Metadata extracted from %s' % filename
            metadata = Metadata(description)
            metadata.from_fits_header(header)

            intensity = scale * hdulist['INTENSITY'].data
            intensity_header = hdulist['INTENSITY'].header
            intensity_metadata = Metadata('Intensity metadata')
            intensity_metadata.from_fits_header(intensity_header)
        except Exception as e:
            # If the file could not be read re-raise the exception
            # with a more meaningful error message.
            strg = "%s: Could not read illumination data file %s.\n   %s" % \
                (e.__class__.__name__, filename, e)
            raise IOError(strg)

        # The wavelength data is optional
        try:
            wavelength = hdulist['WAVELENGTH'].data
            wavelength_header = hdulist['WAVELENGTH'].header
            wavelength_metadata = Metadata('Wavelength metadata')
            wavelength_metadata.from_fits_header(wavelength_header)
        except (KeyError, AttributeError):
            wavelength = None
            wavelength_metadata = None

        hdulist.close()

    elif ftype == 'ASCII':
        if isinstance(filename, str):
            # A single string is provided - there is just an intensity file
            intensity = scale * np.loadtxt(filename)
            intensity_metadata = None
            wavelength = None
            wavelength_metadata = None
        else:
            # A list of strings has been provided. Attempt to open each
            # file, ignoring the wavelength file if not specified or it
            # doesn't exist.
            intensity = scale * np.loadtxt(filename[0])
            intensity_metadata = None
            try:
                wavelength = np.loadtxt(filename[1])
            except Exception:
                wavelength = None
            wavelength_metadata = None

    elif ftype == 'IMAGE' or ftype == 'JPEG':
        if _PIL_AVAILABLE:
            # Convert to a numpy array of the correct shape, reducing the
            # amplitude by the given scale factor.
            data = _np_from_jpeg(filename)
            intensity = scale * data.astype(np.float32)
            intensity_metadata = None

            # Add some test wavelength data if required. The wavelength will
            # increase linearly from bottom to top over the range specified.
            if add_wavelength:
                wavelength = np.empty_like(intensity)
                wav = np.linspace(wavmin, wavmax, wavelength.shape[1])
                wavelength[:, :] = wav
                wavelength = np.transpose(wavelength)
            else:
                wavelength = None
            wavelength_metadata = None

        else:
            strg = "Sorry, file format %s can't be processed " \
                "because the Python Imaging Library is not available." % ftype
            raise ImportError(strg)

    else:
        # Other file formats are not yet supported
        strg = "Sorry, file format %s is not supported." % ftype
        raise ValueError(strg)

    # Bail out if the intensity data has not been read successfully.
    if intensity is None:
        strg = "No intensity data could be found within file.\n   %s" % \
            filename
        raise ValueError(strg)

    # Ensure that the intensity and wavelength arrays are
    # compatible.
    illumination_shape = intensity.shape[-2:]
    if len(intensity.shape) > 2:
        slices = intensity.shape[0]
    else:
        slices = 0

    # If the wavelength array is 1-D and the intensity array is 3-D
    # the wavelength array needs to be reshaped into a 3-D array.
    if wavelength is not None:
        if len(wavelength.shape) == 1 and len(intensity.shape) > 2:
            sz1 = wavelength.shape[0]
            wavelength.shape = [sz1, 1, 1]

    if check_wavelength(wavelength, illumination_shape, slices) == False:
        print("WARNING: Wavelength array has unexpected size - ignoring it.")
        wavelength = None

    # Finally, create and return an IlluminationMap object
    illum = MiriIlluminationModel(intensity=intensity, wavelength=wavelength)
    return illum
def create_test_data(rows,
                     columns,
                     pattern,
                     values,
                     metadata=None,
                     cloneimage=None,
                     add_wavelength=True,
                     wavmin=1.0,
                     wavmax=30.0,
                     seedvalue=None):
    """
    
    Create an data object object containing a test pattern,
    which can be useful for generating artificial dark and flat-field data,
    for example.
                 
    :Parameters:
    
    rows: int
        Number of rows
    columns: int
        Number of columns
    pattern: string
        The type of test pattern.
        CONSTANT - A constant value. useful for flat-field tests.
        SLOPE - A flat surface of constant slope. Useful for test data.
        BOX - An image with a central bright box.
        TESTIMAGE - A regular grid of bright images on a constant background.
        DARKMAP - A dark current multiplier map containing random hot pixels.
        BADPIXEL - A bad pixel map containing random dead pixels.
        Other types TBD.
    values: float or tuple of floats
        Values to be used to create the requested type of data.
        For CONSTANT:
        
            * values contains the constant value
            
        For SLOPE
        
            * values[0] contains the constant
            * values[1] contains the row slope
            * values[2] contains the column slope
            
        For BOX
        
            * values[0] contains the constant background
            * values[1] contains the peak brightness
            
        For TESTIMAGE
        
            * values[0] contains the constant background
            * values[1] contains the peak brightness
            * values[2] contains the row spacing in pixels
            * values[3] contains the column spacing in pixels
            
        For DARKMAP
        
            * values[0] contains the minimum dark multipler (normally <=1.0)
            * values[1] contains the maximum dark multipler (normally >=1.0)
            * values[2] contains the number of random hot pixels
            * values[3] contains the hot pixel multipler (normally >1000)
            
        For BADPIXEL
        
            * values contains the number of random dead pixels
            
    metadata: Metadata object, optional
        The primary metadata describing the data. If None, the metadata
        is ignored.
    cloneimage: array_like, optional
        A image to be cloned within the TESTIMAGE, DARKMAP or
        BADPIXEL patterns. If specified, some of the abnormal pixels
        will be stamped with this image. If not specified only single
        pixels are modified.
    add_wavelength: boolean, optional, default=True
        Add a test wavelength map to the data. Not valid for DARKMAP
        or BADPIXEL.
    wavmin: float, optional, default=1.0
        Minimum wavelength associated with data in microns.
    wavmax: float, optional, default=30.0
        Maximum wavelength associated with data in microns.
    seedvalue: int, optional, default=None
        The seed to be sent to the np.random number generator before
        generating the test data.
        If not specified, a value of None will be sent, which
        randomises the seed.
        
    :Raises:
        
    TypeError
        Raised if any of the parameters are of the wrong type, size
        or shape.
        
    :Returns:
    
    For the DARKMAP pattern, a new MeasuredModel object.
    For the BADPIXEL pattern, a new MiriBadPixelModel object.
    For other patterns, a new MiriIlluminationModel object.
        
    """
    # Set the seed for the np.random function.
    np.random.seed(seedvalue)

    # Test the requested data size
    try:
        rows = int(rows)
        columns = int(columns)
    except (TypeError, ValueError):
        strg = "Row and column values must be integers."
        raise TypeError(strg)

    # Define an intensity array containing the defined pattern.
    if pattern == 'CONSTANT':
        # A constant level.
        try:
            datavalues = float(values) * np.ones([rows, columns],
                                                 dtype=np.float32)
        except (TypeError, ValueError) as e:
            strg = "CONSTANT pattern needs a single floating point value"
            strg += "\n %s" % e
            raise TypeError(strg)

    elif pattern == 'SLOPE':
        # A flat, sloping surface.
        try:
            datavalues = \
                np.fromfunction(
                            lambda i,j: values[0] + i*values[1] + j*values[2],
                            [rows,columns])
        except (TypeError, ValueError, IndexError) as e:
            strg = "SLOPE pattern needs a tuple of 3 floats"
            strg += "\n %s" % e
            raise TypeError(strg)

    elif pattern == "BOX":
        # A box image.
        try:
            dmin = float(values[0])
            dmax = float(values[1])
            djump = dmax - dmin
        except (TypeError, ValueError, IndexError) as e:
            strg = "BOX pattern needs a tuple of (float, float)"
            strg += "\n %s" % e
            raise TypeError(strg)

        # Initialise the array full of background values and set a
        # central box to the maximum value.
        datavalues = dmin * np.ones([rows, columns], dtype=np.float32)

        rmin = rows / 3
        rmax = rmin * 2
        cmin = columns / 3
        cmax = cmin * 2
        datavalues[rmin:rmax, cmin:cmax] += djump

    elif pattern == "TESTIMAGE":
        # A grid of test images.
        try:
            dmin = float(values[0])
            dmax = float(values[1])
            djump = dmax - dmin
            rowstep = int(values[2])
            colstep = int(values[3])
        except (TypeError, ValueError, IndexError) as e:
            strg = "TESTIMAGE pattern needs a tuple of (float, float, int, int)"
            strg += "\n %s" % e
            raise TypeError(strg)

        # Initialise the array full of background values.
        datavalues = dmin * np.ones([rows, columns], dtype=np.float32)

        if cloneimage is not None:
            cloneimage = np.asarray(cloneimage)
            cloneimage *= djump
            lastrow = rows - cloneimage.shape[0] + 1
            lastcol = columns - cloneimage.shape[1] + 1
        else:
            lastrow = rows
            lastcol = columns

        for row in range(0, lastrow, rowstep):
            for col in range(0, lastcol, colstep):
                # Stamp either the clone image or a single pixel.
                if (cloneimage is not None):

                    for xx in range(0, cloneimage.shape[0]):
                        rr = row + xx
                        for yy in range(0, cloneimage.shape[1]):
                            cc = col + yy
                            datavalues[rr, cc] += cloneimage[xx, yy]
                else:
                    datavalues[row, col] = djump

    elif pattern == 'DARKMAP':
        # A dark current multipler map. There is never a wavelength array
        add_wavelength = False
        # The data contains 1.0 unless otherwise specified.
        datavalues = np.ones([rows, columns], dtype=np.float32)
        try:
            dmin = float(values[0])
            dmax = float(values[1])
            nhot = int(values[2])
            dhot = float(values[3])
        except (TypeError, ValueError, IndexError) as e:
            strg = "DARKMAP pattern needs a tuple of (float, float, int, float)"
            strg += "\n %s" % e
            raise TypeError(strg)

        # Rather than set every individual pixel (which would be inefficient)
        # create a map containing zones of different dark current multiplers.
        DARK_ZONES = 4
        rstep = datavalues.shape[0] // DARK_ZONES
        cstep = datavalues.shape[1] // DARK_ZONES

        for row in range(0, rows - rstep, rstep):
            for column in range(0, columns - rstep, cstep):
                if dmin < dmax:
                    rvalue = float(np.random.uniform(dmin, dmax))
                else:
                    # No variation
                    rvalue = dmin
                datavalues[row:row + rstep, column:column + rstep] = rvalue

        if cloneimage is not None:
            cloneimage = np.asarray(cloneimage)
            cloneimage *= dhot
            lastrow = rows - cloneimage.shape[0] + 1
            lastcol = columns - cloneimage.shape[1] + 1
        else:
            lastrow = rows
            lastcol = columns

        for zap in range(0, nhot):
            hit_row = int(np.random.uniform(0, lastrow))
            hit_column = int(np.random.uniform(0, lastcol))
            # Stamp only a small percent of the hot pixels with the clone image.
            MAX_CLONED_IMAGE_PERCENT = 5.0
            if (cloneimage is not None) and \
               (np.random.uniform(0, 100) < MAX_CLONED_IMAGE_PERCENT):

                for xx in range(0, cloneimage.shape[0]):
                    rr = hit_row + xx
                    for yy in range(0, cloneimage.shape[1]):
                        cc = hit_column + yy
                        datavalues[rr, cc] += cloneimage[xx, yy]
            else:
                datavalues[hit_row, hit_column] = dhot

    elif pattern == 'BADPIXEL':
        # A bad pixel map. There is never a wavelength array.
        add_wavelength = False
        datavalues = np.zeros([rows, columns], dtype=np.uint16)
        try:
            ndead = int(values)
        except (TypeError, ValueError) as e:
            strg = "BADPIXEL pattern needs an integer bad pixel count"
            strg += "\n %s" % e
            raise TypeError(strg)

        if cloneimage is not None:
            cloneimage = np.asarray(cloneimage)
            cloneimage *= _BAD_PIXEL
            lastrow = rows - cloneimage.shape[0] + 1
            lastcol = columns - cloneimage.shape[1] + 1
        else:
            lastrow = rows
            lastcol = columns

        for zap in range(0, ndead):
            hit_row = int(np.random.uniform(0, lastrow))
            hit_column = int(np.random.uniform(0, lastcol))
            # Stamp only a small percent of the bad pixels with the clone image.
            if (cloneimage is not None) and \
               (np.random.uniform(0, 100) < _MAX_CLONED_IMAGE_PERCENT):
                # TODO: I can't get this slicing to work.
                #                uptorow = hit_row + cloneimage.shape[0] - 1
                #                uptocol = hit_column + cloneimage.shape[1] - 1
                #                datavalues[hit_row:uptorow,hit_column:uptocol] = cloneimage
                for xx in range(0, cloneimage.shape[0]):
                    rr = hit_row + xx
                    for yy in range(0, cloneimage.shape[1]):
                        cc = hit_column + yy
                        datavalues[rr, cc] = cloneimage[xx, yy]
            else:
                datavalues[hit_row, hit_column] = _BAD_PIXEL

#         flagvalues=(_GOOD_PIXEL,_BAD_PIXEL)
#         flagnames=('GOOD','BAD')

# TODO: Add FRINGE pattern?
    else:
        # Other data types are not supported
        strg = "Sorry, test data pattern %s is not supported." % pattern
        raise ValueError(strg)

    # Add some test wavelength data if required. The wavelength will
    # increase linearly from bottom to top over the range specified.
    if add_wavelength:
        wavelength = np.empty_like(datavalues)
        wav = np.linspace(wavmin, wavmax, wavelength.shape[1])
        wavelength[:, :] = wav
#        wavelength = np.transpose(wavelength)
    else:
        wavelength = None

    data_metadata = None
    wavelength_metadata = None

    # Finally, create and return an appropriate object
    if pattern == 'BADPIXEL':
        data_object = MiriBadPixelMaskModel(dq=datavalues)
#         data_object = BadPixelMap(datavalues, flagvalues=flagvalues,
#                                   flagnames=flagnames, metadata=metadata,
#                                   dq_metadata=data_metadata)
    elif pattern == 'DARKMAP':
        data_object = MiriMeasuredModel(data=datavalues)


#     elif pattern == 'FRINGEMAP':
#         data_object = MiriMeasuredModel(data=datavalues)
    else:
        data_object = MiriIlluminationModel(intensity=datavalues,
                                            wavelength=wavelength)
    return data_object
    def test_subarray_to_subarray(self):
        # Test the extraction of a subarray from input data containing
        # the same subarray.
        if VERBOSE:
            print("\nTesting subarray to subarray modes: ", end='')
        if TEST_MODE_COVERAGE > 0.99:
            modes_to_test = list(detector_properties['SUBARRAY'].keys())
            random.shuffle(modes_to_test)
        else:
            nsamples = int(0.5 + len(list(detector_properties['SUBARRAY'].keys())) * \
                           TEST_MODE_COVERAGE)
            nsamples = max(nsamples, MINSAMPLES)
            modes_to_test = random.sample( \
                                list(detector_properties['SUBARRAY'].keys()),
                                nsamples)
        with warnings.catch_warnings():  # Suppress FITS header warnings.
            warnings.simplefilter("ignore")
            for submode in modes_to_test:
                print(submode + ' ', end='')

                subarray = detector_properties.get('SUBARRAY', submode)
                if subarray is not None:

                    testvalues = np.fromfunction(
                        lambda i, j: 2.0 + i * 1.0 + j * 1.0,
                        [subarray[2], subarray[3]])
                    wavelength = np.empty_like(testvalues)
                    wav = np.linspace(5.0, 25.0, wavelength.shape[1])
                    wavelength[:, :] = wav

                    test_map = MiriIlluminationModel(intensity=testvalues,
                                                     wavelength=wavelength)

                    map_data = test_map.get_illumination()
                    # The illumination data is assumed to have a valid shape and be
                    # filled with a range of values (the data should have a slope).
                    self.assertTrue(map_data.shape[0] > 0)
                    self.assertTrue(map_data.shape[1] > 0)
                    self.assertNotAlmostEqual(map_data.min(), map_data.max())

                    sca = SensorChipAssembly1(logger=LOGGER)
                    sca.setup(_DEFAULT_SCA,
                              readout_mode=_DEFAULT_READOUT,
                              subarray=submode,
                              inttime=None,
                              ngroups=2,
                              nints=1,
                              temperature=6.5,
                              cosmic_ray_mode='NONE',
                              simulate_dark_current=SIMULATE_DARK,
                              verbose=0)
                    sca.set_illumination(test_map, subarray_input=submode)
                    simulated_data = sca.exposure()
                    # The simulated data must have a valid shape and contain a
                    # range of values. (A more specific test is difficult because
                    # read noise is included. The accuracy of subarray extraction
                    # should already have been tested in the test_detector unit
                    # tests.)
                    self.assertTrue(simulated_data.shape[0] > 0)
                    self.assertTrue(simulated_data.shape[1] > 0)
                    self.assertNotAlmostEqual(simulated_data.min(),
                                              simulated_data.max())
                    del sca
                    del test_map
        if VERBOSE:
            print("", end='')