Example #1
0
    def create_lists_cam(self):
        """Initialize of all image lists, old lists are deleted."""
        self._lists_intern = od()
        for key, f in six.iteritems(self.filters.filters):
            l = self.lst_type(list_id=key, list_type=f.type,
                              camera=self.camera,
                              geometry=self.meas_geometry)
            l.filter = f
            if f.meas_type_acro not in self._lists_intern:
                self._lists_intern[f.meas_type_acro] = od()
            self._lists_intern[f.meas_type_acro][f.acronym] = l
            self.lists_access_info[f.id] = [f.meas_type_acro, f.acronym]

        if not bool(self.camera.dark_info):
            msg = ("Warning: dark image lists could not be initiated, no "
                   "dark image file information available in self.camera")
            print_log.warning(msg)
            return 0

        for item in self.camera.dark_info:
            l = DarkImgList(list_id=item.id, list_type=item.type,
                            read_gain=item.read_gain, camera=self.camera)
            l.filter = item
            if item.meas_type_acro not in self._lists_intern:
                self._lists_intern[item.meas_type_acro] = od()
            self._lists_intern[item.meas_type_acro][item.acronym] = l
            self.lists_access_info[item.id] = [item.meas_type_acro,
                                               item.acronym]
def load_hd_new(file_path, meta=None, **kwargs):
    """Load new format from Heidelberg group.

    This format contains IPTC information

    :param file_path: image file path
    :param dict meta: optional, meta info dictionary to which additional meta
        information is supposed to be appended
    :return:
        - ndarray, image data
        - dict, dictionary containing meta information
    """
    if meta is None:
        meta = {}
    try:
        from PIL.Image import open
        read = open(file_path)
        meta["texp"] = float(read.tag_v2[270].split(" ")[0].split("s")[0])
        img = asarray(read)
    except ModuleNotFoundError:
        print_log.warning(
            "Python Imaging Library (PIL) could not be imported. Using "
            "opencv method for image import. Cannot import exposure time "
            "info from tiff header...please install PIL")
        img = imread(file_path, -1)
    # img = asarray(im)[::-1, 0::] #flip
    img = rot90(rot90(img))
    meta["start_acq"] = datetime.strptime(
        "_".join(basename(file_path).split("_")[:3]), "%Y%m%d_%H%M%S_%f")

    return (img, meta)
Example #3
0
    def guess_missing_settings(self, plume_img):
        """Call and return :func:`set_missing_ref_areas`.

        Note
        ----
        This is the previous name of the method
        :func:`set_missing_ref_areas`
        """
        print_log.warning(
            "Please use new name of method: set_missing_ref_areas",
            DeprecationWarning)
        self.set_missing_ref_areas(plume_img)
Example #4
0
    def get_radiances(self, img, line_ids=None):
        """Get radiances for dilution fit along terrain lines.

        The data is only extracted along specified input lines. The terrain
        distance retrieval :func:`det_topo_dists_lines_line` must have been
        performed for that.

        Parameters
        ----------
        img : Img
            vignetting corrected plume image from which the radiances are
            extracted
        line_ids : list
            if desired, the data can also be accessed for specified line ids,
            which have to be provided in a list. If empty (default), all lines
            assigned to this class are considered

        """
        if line_ids is None:
            line_ids = []
        if not isinstance(img, Img) or not img.edit_log["vigncorr"]:
            raise ValueError("Invalid input, need Img class and Img needs to "
                             "be corrected for vignetting")
        if img.is_cropped or img.is_resized:
            raise ImgModifiedError("Image must not be cropped or rescaled")
        if len(line_ids) == 0:
            line_ids = self.line_ids

        dists, rads = [], []
        for line_id in line_ids:
            if line_id in self._dists_lines:
                skip = int(self._skip_pix[line_id])
                l = self.lines[line_id]
                mask = self._masks_lines[line_id]
                dists.extend(self._dists_lines[line_id][mask])
                rads.extend(l.get_line_profile(img)[::skip][mask])
            else:
                print_log.warning("Distances to line %s not available, please apply "
                     "distance retrieval first using class method "
                     "det_topo_dists_line")
        for x, y, dist in self._add_points:
            dists.append(dist)
            rads.append(img.img[y, x])
        return asarray(dists), asarray(rads)
def load_usgs_multifits_uncompr(file_path, meta=None):
    if meta is None:
        meta = {}
    img = None
    if "filter_id" not in meta:
        logger.warning(
            "filter_id (i.e. on or off) in input arg meta not specified."
            "Using default filter_id=on")
        meta["filter_id"] = "on"
    try:
        f = fits.open(file_path)
        idx = 1 if meta["filter_id"] == "off" else 0
        hdu = f[idx]
        h = hdu.header

        try:
            meta["start_acq"] = matlab_datenum_to_datetime(h["DATETIME"])
            meta["texp"] = h["EXPTIME"] * h["NUMEXP"] / 1000
            meta["bit_depth"] = h["BITDEPTH"]
        except:
            print_log.warning(
                "Failed to import image specific meta information from image "
                "HDU")
        h = f[0].header
        try:
            meta["lon"] = h["LON"]
            meta["lat"] = h["LAT"]
            meta["altitude"] = h["ALT"]
            meta["elev"] = h["ELEVANGL"]
            meta["azim"] = h["AZMTANGL"]
        except:
            print_log.warning(
                "Failed to import camera specific meta information from "
                "primary HDU of FITS file...")
        img = hdu.data
        f.close()
    except Exception as e:
        raise IOError("Failed to import image data using custom method\n"
                      "Error message: %s" % repr(e))
    return (img, meta)
def load_hd_custom(file_path, meta=None, **kwargs):
    """Load image from HD custom camera.

    The camera specs can be found in
    `Kern et al. 2015, Intercomparison of SO2 camera systems for imaging
    volcanic gas plumes <http://www.sciencedirect.com/science/article/pii/
    S0377027314002662#>`__

    Images recorded with this camera type are stored as .tiff files and are

    :param file_path: image file path
    :param dict meta: optional, meta info dictionary to which additional
        meta information is suppose to be appended
    :return:
        - ndarray, image data
        - dict, dictionary containing meta information

    """
    if meta is None:
        meta = {}
    im = imread(file_path, -1)  # [1::, 1::]
    img = flipud(swapaxes(resize(im, (1024, 1024)), 0, 1))
    try:
        f = sub('.tiff', '.txt', file_path)
        file = open(f)
        spl = file.read().split('\n')
        spl2 = spl[0].split("_")
        try:
            meta["texp"] = float(spl[1].split("Exposure Time: ")[1]) / 1000.0
        except BaseException:
            meta["texp"] = float(spl[1].split("Exposure Time: ")[1].replace(
                ",", "."))
        meta["start_acq"] = datetime.strptime(spl2[0] + spl2[1],
                                              '%Y%m%d%H%M%S%f')
    except BaseException:
        raise
        print_log.warning(
            "Failed to read image meta data from text file (cam_id: hd)")
    return (img, meta)
Example #7
0
    def extract_files_time_ival(self, all_paths):
        """Extract all files belonging to specified time interval.

        :param list all_paths: list of image filepaths
        """
        if not self.camera._fname_access_flags["start_acq"]:
            logger.warning("Acq. time information cannot be accessed from file names")
            return all_paths
        acq_time0 = self.camera.get_img_meta_from_filename(all_paths[0])[0]
        if acq_time0.date() == date(1900, 1, 1):
            paths = self._find_files_ival_time_only(all_paths)
        else:
            paths = self._find_files_ival_datetime(all_paths)

        if not bool(paths):
            print_log.warning("Error: no files could be found in specified time "
                              "interval %s - %s" % (self.start, self.stop))
            self.USE_ALL_FILES = True
        else:
            logger.info("%s files of type were found in specified time interval %s "
                        "- %s" % (len(paths), self.start, self.stop))
        return paths
Example #8
0
def get_source_info(source_id, try_online=True):
    """Try access source information from file "my_sources.txt".

    File is part of package data

    :param str source_id: string ID of source (e.g. Etna)
    :param bool try_online: if True and local access fails, try to find source
        ID in online database
    """
    from pyplis import _LIBDIR
    dat = od()
    if source_id == "":
        return dat
    found = 0
    with open(join(_LIBDIR, "data", "my_sources.txt")) as f:
        for line in f:
            if "END" in line and found:
                return od([(source_id, dat)])
            spl = line.split(":")
            if found:
                if not any([line[0] == x for x in ["#", "\n"]]):
                    spl = line.split(":")
                    k = spl[0].strip()
                    data_str = spl[1].split("#")[0].strip()
                    dat[k] = data_str
            if spl[0] == "source_ids":
                if source_id in [
                        x.strip() for x in spl[1].split("#")[0].split(',')
                ]:
                    found = 1
    print_log.warning("Source info for source %s could not be found" %
                      source_id)
    if try_online:
        try:
            return get_source_info_online(source_id)
        except BaseException:
            pass
    return od()
def load_comtessa(file_path, meta=None):
    """Load image from a multi-layered fits file (several images in one file).

    Meta data is available only inside the header.

    This corresponds to image data from the COMTESSA project at Norwegian
    Institute for Air Research.

    Note
    ----
    The comtessa *.fits files have several timestamps: 1) Filename --> minute
    in which the image was saved. 2) Meta information in the image header -->
    computer time when the image was saved. 3) First 14 image pixels contain
    a binary timestamp --> time when exposure was finished. Here nr 3) is saved
    as meta['stop_acq']. meta['start_acq'] is calculated from meta['stop_acq']
    and meta['texp']. meta['user_param1'] is the gain (float type).

    Parameters
    ----------
    file_path : string
        image file path
    meta: dictionary
        optional, meta info dictionary to which additional meta
        information is appended. The image index should be provided with key
        "fits_idx".

    Returns
    -------
    ndarray
        image data
    dict
        dictionary containing meta information

    """
    if meta is None:
        meta = {}
    hdulist = fits.open(file_path)
    try:
        img_hdu = meta['fits_idx']
    except:
        img_hdu = 0
        meta['fits_idx'] = 0
        print_log.warning(
            "Loading of comtessa fits file without providing the image index "
            "of desired image within the file. Image index was set to 0. "
            "Provide the image index via the meta = {'fits_idx':0} keyword.")
    # Load the image
    image = hdulist[img_hdu].data
    # read and replace binary time stamp
    endtime = _read_binary_timestamp(image)
    image[0, 0:14] = image[1, 0:14]
    # load meta data
    imageHeader = hdulist[img_hdu].header
    ms = int(imageHeader['EXP']) * 1000
    meta.update({
        "start_acq": endtime - timedelta(microseconds=ms),
        "stop_acq": endtime,
        "texp": float(imageHeader['EXP']) / 1000.,  # in seconds
        "temperature": float(imageHeader['TCAM']),
        "ser_no": imageHeader['SERNO'],
        "user_param1": float(imageHeader['GAIN'])
    })
    return (image, meta)
Example #10
0
    def bg_from_poly_surface_fit(self,
                                 plume,
                                 mask=None,
                                 polyorder=2,
                                 pyrlevel=4):
        """Apply poly surface fit to plume image for bg retrieval.

        Parameters
        ----------
        plume : Img
            plume image
        mask : ndarray
            mask specifying gas free areas (if None, use all pixels). Note that
            the mask needs to be computed within the same ROI and at the same
            pyrlevel as the input plume image
        polyorder : int
            order of polynomial used for fit, defaults to 4
        pyrlevel : int
            pyramid level in which fit is performed
            (e.g. 4 => image size for fit is reduced by factor 2^4 = 16). Note
            that the
        :return tuple: 1st entry: fitted background image
            second: ``PolySurfaceFit`` object

        Returns
        -------
        ndarray
            fitted sky background

        Note
        ----
        The :class:`PolySurfaceFit` object used to retrieve the background
        is stored in the :attr:`_last_surffit`.

        """
        if not isinstance(plume, Img):
            raise TypeError("Need instance of pyplis Img class")
        if mask is None:
            mask = self.surface_fit_mask
        if not isinstance(mask, ndarray):
            try:
                mask = mask.img
                if not mask.shape == plume.shape:
                    raise AttributeError("Shape mismatch between mask and "
                                         "plume image")
            except:
                mask = self._init_bgsurf_mask(plume)
        pyrlevel_rel = pyrlevel - plume.pyrlevel
        if pyrlevel_rel < 0:
            print_log.warning(
                "Pyramid level of input image (%d) is larger than desired "
                "pyramid level for computation of surface fit (%d). Using "
                "the current pyrlevel %d of input image" %
                (plume.pyrlevel, pyrlevel))
            pyrlevel_rel = 0
        # update settings from input keyword arg

        fit = PolySurfaceFit(plume.img,
                             mask,
                             polyorder=polyorder,
                             pyrlevel=pyrlevel_rel)
        if not fit.model.shape == plume.shape:
            raise ValueError("Mismatch in shape between input plume image and "
                             "fit result of PolySurfaceFit. Check pyramid "
                             "level of input image")
        self._last_surffit = fit
        return fit.model
Example #11
0
    def fill_image_lists(self):
        """Import all images and fill image list objects."""
        warnings = []
        cam = self.camera

        #: check if image filetype is specified and if not, set option to use
        #: all file types
        self._check_file_type()
        if self.base_dir is None or not exists(self.base_dir):
            s = ("Warning: image base directory does not exist, method "
                 "init_image_lists aborted in Dataset")
            warnings.append(s)
            print_log.warning(s)
            return False
        #: load all file paths
        paths = self.get_all_filepaths()
        # paths now includes all valid paths dependent on whether file_type is
        # specified or not and whether also subdirectories were considered
        if not bool(paths):
            s = ("Warning: lists could not be initiated, no valid files found "
                 "method init_image_lists aborted in Dataset")
            warnings.append(s)
            print_log.warning(s)
            return False
        # check which image meta information can be accessed from first file in
        # list (updates ``_fname_access_flags`` in :class:`Camera`)
        self.check_filename_info_access(paths[0])

        # get the current meta access flags
        flags = cam._fname_access_flags
        if self.USE_ALL_FILES and flags["start_acq"]:
            # take all files in the basefolder (i.e. set start and stop date
            # the first and last date of the files in the folder)
            self.setup.start = cam.get_img_meta_from_filename(paths[0])[0]
            self.setup.stop = cam.get_img_meta_from_filename(paths[-1])[0]

        #: Set option to use all files in case acquisition time stamps cannot
        #: be accessed from filename
        if not flags["start_acq"]:
            print_log.warning("Acquisition time access from filename not possible, "
                              "using all files")
            self.setup.options["USE_ALL_FILES"] = True

        #: Separate the current list based on specified time stamps
        if not self.setup.options["USE_ALL_FILES"]:
            paths_temp = self.extract_files_time_ival(paths)
            if not bool(paths_temp):
                # check if any files were found in specified t-window
                s = ("No images found in specified time interval "
                     "%s - %s, mode was changed to: USE_ALL_FILES=True"
                     % (self.start, self.stop))
                warnings.append(s)
                self.setup.options["USE_ALL_FILES"] = True
            else:
                paths = paths_temp
        if self.setup.ON_OFF_SAME_FILE:
            logger.warning("Option ON_OFF_SAME_FILE is active: using same file paths "
                           "in default on and offband list. Please note that no "
                           "further file separation is applied (e.g. separation of "
                           "dark images)")
            # the function add_files ads the file paths to the list and loads
            # the current and next images (at index 0 and 1)
            self.img_lists[self.filters.default_key_on].add_files(paths)
            self.img_lists[self.filters.default_key_off].add_files(paths)
        else:
            if not (flags["filter_id"] and flags["meas_type"]):
                #: it is not possible to separate different image types (on,
                #: off, dark..) from filename, thus all are loaded into on
                #: image list
                warnings.append("Images can not be separated by type/meas_type"
                                " (e.g. on, off, dark, offset...) from "
                                "filename info, loading "
                                "all files into on-band list")
                self.setup.options["SEPARATE_FILTERS"] = False
                i = self.lists_access_info[self.filters.default_key_on]
                self._lists_intern[i[0]][i[1]].add_files(paths)
                [logger.warning(x) for x in warnings]
                return True

            #: now perform separation by meastype and filter
            for p in paths:
                try:
                    _, filter_id, meas_type, _, _ = self.camera. \
                        get_img_meta_from_filename(p)
                    self._lists_intern[meas_type][filter_id].files.append(p)
                except:
                    logger.warning("File %s could not be added..." % p)

            for meas_type, sub_dict in six.iteritems(self._lists_intern):
                for filter_id, lst in six.iteritems(sub_dict):
                    lst.init_filelist()
            for lst in self.img_lists_with_data.values():
                lst.load()
            self.assign_dark_offset_lists()
        if self.LINK_OFF_TO_ON:
            try:
                off_list = self.get_list(self.filters.default_key_off)
                self.get_list(self.filters.default_key_on). \
                    link_imglist(off_list)
            except BaseException:
                pass
        if self.setup.REG_SHIFT_OFF:
            for lst in self.img_lists_with_data.values():
                if lst.list_type == "off":
                    lst.shift_mode = True

        [logger.warning(x) for x in warnings]
        return True