def stop(self, val): try: if len(val) == 2: self.x1 = val[0] self.y1 = val[1] except BaseException: logger.warning("Stop coordinates could not be set")
def dist_other(self, other): """Determine the distance to another line. Note ---- 1. The offset is applied in relative coordinates, i.e. it does not consider the pyramide level or ROI. #. The two lines need to be parallel Parameters ---------- other : LineOnImage the line to which the distance is retrieved Returns ------- float retrieved distance in pixel coordinates Raises ------ ValueError if the two lines are not parallel """ dx0, dy0 = other.x0 - self.x0, other.y0 - self.y0 dx1, dy1 = other.x1 - self.x1, other.y1 - self.y1 if dx1 != dx0 or dy1 != dy0: logger.warning("Lines are not parallel...") return mean([norm([dx0, dy0]), norm([dx1, dy1])])
def integrate_profile(self, input_img, pix_step_length=None): """Integrate the line profile on input image. Parameters ---------- input_img : Img input image data for """ try: # in case input is an Img input_img = input_img.img except: pass vals = self.get_line_profile(input_img) if pix_step_length is None: logger.warning("No information about integration step lengths provided " "Integration is performed in units of pixels") return sum(vals) try: pix_step_length = pix_step_length.img except: pass if isinstance(pix_step_length, ndarray): if not pix_step_length.shape == input_img.shape: raise ValueError("Shape mismatch between input image and " "pixel") pix_step_length = self.get_line_profile(pix_step_length) return sum(vals * pix_step_length)
def set_test_data_path(save_path): """Set local path where test data is stored.""" if save_path.lower() in all_test_data_paths(): logger.info("Path is already in search tree") return dirs = data_search_dirs() fp = join(dirs[0], "_paths.txt") if not exists(fp): fp = join(dirs[1], "_paths.txt") save_path = abspath(save_path) try: if not exists(save_path): raise IOError("Could not set test data path: specified location " "does not exist: %s" % save_path) with open(fp, "a") as f: f.write("\n" + save_path + "\n") print_log.info("Adding new path for test data location in " "file _paths.txt: %s" % save_path) f.close() if "pyplis_etna_testdata" not in listdir(save_path): logger.warning( "WARNING: test data folder (name: pyplis_etna_testdata) " "could not be found at specified location, please download " "test data, unzip and save at: %s" % save_path) except: raise
def __setitem__(self, key, val): if not isinstance(val, Filter): raise ValueError('Invalid input: need instance of Filter class, ' 'got {}'.format(val)) if key in self._filters: logger.warning('Filter with ID {} already exists in FilterSetup ' 'and will be overwritten'.format(key)) self._filters[key] = val
def load_from_fits(self, file_path): """Load stack object (fits). Parameters ---------- file_path : str file path of calibration data Returns ------- HDUList opened HDU object (e.g. to access potential further data in a function that is calling this method) """ if not exists(file_path): raise IOError("CalibData could not be loaded, " "path does not exist") hdu = fits.open(file_path) self.senscorr_mask = Img(hdu[0].data) self.calib_id = hdu[0].header["calib_id"] self.type = hdu[0].header["type"] for key, val in six.iteritems(hdu[0].header): k = key.lower() if k in self.senscorr_mask.edit_log: self.senscorr_mask.edit_log[k] = val if self.senscorr_mask.is_cropped: logger.warning( "Imported sensitivity correction mask is flagged as cropped " "and might not work on uncropped images") ctable = hdu[1] try: times = ctable.data["time_stamps"].byteswap().newbyteorder() self.time_stamps = [ datetime.strptime(x, "%Y%m%d%H%M%S%f") for x in times ] except BaseException: logger.warning( "Failed to import vector containing calib time stamps from " "FITS") try: self.tau_vec = ctable.data["tau_vec"].byteswap().newbyteorder() except BaseException: logger.warning("Failed to import calibration tau vector from FITS") try: self.cd_vec = ctable.data["cd_vec"].byteswap().newbyteorder() except BaseException: logger.warning("Failed to import CD vector from FITS") try: self.cd_vec_err = ctable.data["cd_vec_err"].byteswap( ).newbyteorder() except BaseException: logger.warning("Failed to import CD uncertainty vector from FITS") if self.type == "base": hdu.close() hdu = None return hdu
def get_all_filepaths(self): """Find all valid image filepaths in current base directory. Returns ------- list list containing all valid image file paths (Note, that these include all files found in the folder(s) in case the file type is not explicitely set in the camera class.) """ logger.info("\nSEARCHING VALID FILE PATHS IN\n%s\n" % self.base_dir) p = self.base_dir ftype = self.file_type if not isinstance(ftype, str): logger.warning("file_type not specified in Dataset..." "Using all files and file_types") self.setup.options["USE_ALL_FILES"] = True self.setup.options["USE_ALL_FILE_TYPES"] = True if p is None or not exists(p): message = ('Error: path %s does not exist' % p) logger.warning(message) return [] if not self.INCLUDE_SUB_DIRS: logger.info("Image search is only performed in specified directory " "and does not include subdirectories") if self.USE_ALL_FILE_TYPES: logger.info("Using all file types") all_paths = [join(p, f) for f in listdir(p) if isfile(join(p, f))] else: logger.info("Using only %s files" % self.file_type) all_paths = [join(p, f) for f in listdir(p) if isfile(join(p, f)) and f.endswith(ftype)] else: logger.info("Image search includes files from subdirectories") all_paths = [] if self.USE_ALL_FILE_TYPES: logger.info("Using all file types") for path, subdirs, files in walk(p): for filename in files: all_paths.append(join(path, filename)) else: logger.info("Using only %s files" % ftype) for path, subdirs, files in walk(p): for filename in files: if filename.endswith(ftype): all_paths.append(join(path, filename)) all_paths.sort() logger.info("Total number of files found %s" % len(all_paths)) return all_paths
def stop(self, val): try: self._stop = to_datetime(val) self.USE_ALL_FILES = False except BaseException: if val is not None: logger.warning( "Input %s could not be assigned to stop time in " "setup" % val)
def get_all_files_in_dir(directory, file_type=None, include_sub_dirs=False): """Find all files in a certain directory. Parameters ---------- directory : str path to directory file_type : :obj:`str`, optional specify file type (e.g. "png", "fts"). If unspecified, then all files are considered include_sub_dirs : bool if True, also all files from all sub-directories are extracted Returns ------- list sorted list containing paths of all files detected """ p = directory if p is None or not exists(p): message = ('Error: path %s does not exist' % p) logger.warning(message) return [] use_all_types = False if not isinstance(file_type, str): use_all_types = True if include_sub_dirs: logger.info("Include files from subdirectories") all_paths = [] if use_all_types: logger.info("Using all file types") for path, subdirs, files in walk(p): for filename in files: all_paths.append(join(path, filename)) else: logger.info("Using only %s files" % file_type) for path, subdirs, files in walk(p): for filename in files: if filename.endswith(file_type): all_paths.append(join(path, filename)) else: logger.info("Exclude files from subdirectories") if use_all_types: logger.info("Using all file types") all_paths = [join(p, f) for f in listdir(p) if isfile(join(p, f))] else: logger.info("Using only %s files" % file_type) all_paths = [ join(p, f) for f in listdir(p) if isfile(join(p, f)) and f.endswith(file_type) ] all_paths.sort() return all_paths
def plot(self, add_label_str="", ax=None, **kwargs): """Plot calibration data and fit result. Parameters ---------- add_label_str : str additional string added to label of plots for legend ax : matplotlib axes object, if None, a new one is created """ if "color" not in kwargs: kwargs["color"] = "b" if ax is None: fig, ax = subplots(1, 1, figsize=(10, 8)) taumin, taumax = self.tau_range x = linspace(taumin, taumax, 100) cds = self.cd_vec ax.plot(self.tau_vec, cds, ls="", marker=".", label="Data %s" % add_label_str, **kwargs) try: ax.errorbar(self.tau_vec, cds, yerr=self.cd_vec_err, marker="None", ls=" ", c="#b3b3b3") except BaseException: logger.warning("No CD errors available") try: cds_calib = self.calib_fun(x, *self.calib_coeffs) ax.plot(x, cds_calib, ls="-", marker="", label="Fit result", **kwargs) except TypeError: logger.info("Calibration poly probably not fitted") ax.set_title("Calibration data, ID: %s" % self.calib_id) ax.set_ylabel(r"$S_{%s}$ [cm$^{-2}$]" % SPECIES_ID) ax.set_xlabel(r"$\tau_{%s}$" % self.calib_id) ax.grid() ax.legend(loc='best', fancybox=True, framealpha=0.7) return ax
def change_img_base_dir(self, img_dir): """Set or update the current base_dir. :param str p: new path """ if not exists(img_dir): msg = ("Could not update base_dir, input path %s does not " "exist" % img_dir) logger.warning(msg) self.warnings.append(msg) return 0 self.setup.base_dir = img_dir
def get_list(self, list_id): """Get image list for one filter. :param str filter_id: filter ID of image list (e.g. "on") """ if list_id not in self.lists_access_info.keys(): raise KeyError("%s ImgList could not be found..." % list_id) info = self.lists_access_info[list_id] lst = self._lists_intern[info[0]][info[1]] if not lst.nof > 0: logger.warning("Image list %s does not contain any images" % list_id) return lst
def load_images(self): """Load the current images in all image lists. Note ---- Gives warning for lists containing no images """ for lst in self.all_lists(): if lst.nof > 0: lst.load() else: logger.warning("No images available in list %s" % lst.list_id)
def plot_calib_fun(self, add_label_str="", shift_yoffset=False, ax=None, **kwargs): """Plot calibration fit result. Parameters ---------- add_label_str : str additional string added to label of plots for legend shift_yoffset : bool if True, the data is plotted without y-offset ax : matplotlib axes object, if None, a new one is created """ if "color" not in kwargs: kwargs["color"] = "b" if ax is None: fig, ax = subplots(1, 1, figsize=(10, 8)) taumin, taumax = self.tau_range x = linspace(taumin, taumax, 100) if self.calib_coeffs is None: logger.warning( "Calibration function not yet fitted, applying default fit" "(1. order polynomial)") self.fit_calib_data() cds_poly = self.calib_fun(x, *self.calib_coeffs) if shift_yoffset: try: cds_poly -= self.y_offset except BaseException: logger.warning("Failed to subtract y offset") try: ax.plot(x, cds_poly, ls="-", marker="", label="Fit result %s" % add_label_str, **kwargs) except TypeError: logger.info("Calibration poly probably not fitted") ax.grid() ax.legend(loc='best', fancybox=True, framealpha=0.7) return ax
def __setitem__(self, key, value): """Update class item.""" if key in self.__dict__: logger.info("Updating %s in background model" % key) self.__dict__[key] = value elif key == "mode": "Updating %s in background model" % key self.mode = value elif key == "surface_fit_mask": self.surface_fit_mask = value elif key == "CORR_MODE": logger.warning( "Got input key CORR_MODE which is out-dated in versions 0.10+" ". Updated background modelling mode accordingly") self.mode = value
def velo_glob(self, val): try: val = float(val) if isnan(val): raise Exception except BaseException: raise ValueError("Invalid input, need float or int...") if val < 0: raise ValueError("Velocity must be larger than 0") elif val > 40: logger.warning("Large value warning: input velocity exceeds 40 m/s") self._velo_glob = val if self._velo_glob_err is None or isnan(self._velo_glob_err): logger.warning("Global velocity error not assigned, assuming 50% of " "velocity") self.velo_glob_err = val * 0.50
def save_as_fits(self, save_dir=None, save_name=None, overwrite_existing=True): """Save stack as FITS file.""" self._format_check() # returns abspath of current wkdir if None save_dir = abspath(save_dir) if not isdir(save_dir): # save_dir is a file path save_name = basename(save_dir) save_dir = dirname(save_dir) if save_name is None: save_name = ("pyplis_imgstack_id_%s_%s_%s_%s.fts" % (self.stack_id, self.start.strftime("%Y%m%d"), self.start.strftime("%H%M"), self.stop.strftime("%H%M"))) else: save_name = save_name.split(".")[0] + ".fts" logger.info("DIR: %s" % save_dir) logger.info("Name: %s" % save_name) hdu = fits.PrimaryHDU() start_acq_str = [x.strftime("%Y%m%d%H%M%S%f") for x in self.start_acq] col1 = fits.Column(name="start_acq", format="25A", array=start_acq_str) col2 = fits.Column(name="texps", format="D", array=self.texps) col3 = fits.Column(name="_access_mask", format="L", array=self._access_mask) col4 = fits.Column(name="add_data", format="D", array=self.add_data) cols = fits.ColDefs([col1, col2, col3, col4]) arrays = fits.BinTableHDU.from_columns(cols) col5 = fits.Column(name="roi_abs", format="I", array=self.roi_abs) roi_abs = fits.BinTableHDU.from_columns([col5]) hdu.data = self.stack hdu.header.update(self.img_prep) hdu.header["stack_id"] = self.stack_id hdu.header.append() hdulist = fits.HDUList([hdu, arrays, roi_abs]) path = join(save_dir, save_name) if exists(path): logger.info("Stack already exists at %s and will be overwritten" % path) try: hdulist.writeto(path, clobber=overwrite_existing) except BaseException: logger.warning("Failed to save stack to FITS File " "(check previous warnings)")
def get_altitude_srtm(self): """Try load camera altitude based on lon, lat and SRTM topo data. Note ---- Requires :mod:`geonum` package to be installed and :attr:`lon` and :attr:`lat` to be set. """ try: from geonum import GeoPoint lon, lat = float(self.lon), float(self.lat) self.altitude = GeoPoint(lat, lon).altitude except Exception as e: logger.warning( "Failed to automatically access local topography altitude" " at camera position using SRTM data: %s" % repr(e))
def find_registration_shift_optflow(on_img, off_img, roi_abs=DEFAULT_ROI, **flow_settings): """Search average shift between two images using optical flow. Computes optical flow between two input images and determines the registration shift based on peaks in two histograms of the orientation angle distribution and vector magnitued distribution of the retrieved flow field. The histogram analysis may be reduced to a certain ROI in the images. The default settings used here correspond to the settings suggested by Peters et al., Use of motion estimation algorithms for improved flux measurements using SO2 cameras, JVGR, 2015. Parameters ---------- on_img : Img onband image containing (preferably fixed) objects in the scene that can be tracked off_img : Img corresponding offband image (ideally recorded at the same time) roi_abs : list if specified, the optical flow histogram parameters are retrieved from the flow field within this ROI (else, the whole image is used) **flow_settings additional keyword args specifying the optical flow computation and post analysis settings (see :class:`pyplis.plumespeed.FarnebackSettings` for details) Returns ------- tuple 2-element tuple containing - float: shift in x-direction - float: shift in y-direction """ if not on_img.shape == off_img.shape: raise ValueError("Shape mismatch between input images") if on_img.pyrlevel != 0: logger.warning("Input images are at pyramid level %d and registration shift " "will be computed for this pyramid level") # from pyplis import OptflowFarneback # flow = OptflowFarneback(on_img, off_img, **flow_settings) raise NotImplementedError("Under development")
def load_stack_fits(self, file_path): """Load stack object (fits). Note ---- FITS stores in Big-endian and needs to be converted into little-endian (see `this issue <https://github.com/astropy/astropy/issues/1156>`__). We follow the suggested fix and use:: byteswap().newbyteorder() on any loaded data array. Parameters ---------- file_path : str file path of stack """ if not exists(file_path): raise IOError("ImgStack could not be loaded, path does not exist") hdu = fits.open(file_path) self.set_stack_data(hdu[0].data.byteswap().newbyteorder(). astype(self.dtype)) prep = Img().edit_log for key, val in six.iteritems(hdu[0].header): if key.lower() in prep.keys(): self.img_prep[key.lower()] = val self.stack_id = hdu[0].header["stack_id"] try: times = hdu[1].data["start_acq"].byteswap().newbyteorder() self.start_acq = asarray([datetime.strptime(x, "%Y%m%d%H%M%S%f") for x in times]) except BaseException: logger.warning("Failed to import acquisition times") try: self.texps = asarray( hdu[1].data["texps"].byteswap().newbyteorder()) except BaseException: logger.warning("Failed to import exposure times") try: self._access_mask = asarray(hdu[1].data["_access_mask"]. byteswap().newbyteorder()) except BaseException: logger.warning("Failed to import data access mask") try: self.add_data = asarray(hdu[1].data["add_data"].byteswap(). newbyteorder()) except BaseException: logger.warning("Failed to import data additional data") self.roi_abs = hdu[2].data["roi_abs"].byteswap().\ newbyteorder() self._format_check()
def get_line_profile(self, array, order=1, **kwargs): """Retrieve the line profile along pixels in input array. Parameters ---------- array : array 2D data array (e.g. image data). Color images are converted into gray scale using :func:`cv2.cvtColor`. order : int order of spline interpolation used to retrieve the values along input coordinates (passed to :func:`map_coordinates`) **kwargs additional keword args passed to interpolation method :func:`map_coordinates` Returns ------- array profile """ try: array = array.img # if input is Img object except BaseException: pass if ndim(array) != 2: if ndim(array) != 3: logger.info("Error retrieving line profile, invalid dimension of " "input array: %s" % (ndim(array))) return if array.shape[2] != 3: logger.info("Error retrieving line profile, invalid dimension of " "input array: %s" % (ndim(array))) return "Input in BGR, conversion into gray image" array = cvtColor(array, COLOR_BGR2GRAY) # Extract the values along the line, using interpolation zi = map_coordinates(array, self.profile_coords, order=order, **kwargs) if sum(isnan(zi)) != 0: logger.warning("Retrieved NaN for one or more pixels along line on input " "array") return zi
def calib_coeffs(self, val): try: iter(val) except BaseException: raise TypeError("Input is not iterable, need list, tuple or " "similar, containing optimisation coefficients") req_num_args = self.num_optargs_fun(self.calib_fun) if not len(val) == req_num_args: raise AttributeError("Number of provided coefficients does not " "match the number of optimisation params " "in current optimisation function. " "Please check and update class attribute " "calib_fun first...") if self._calib_coeffs is not None and len(self._calib_coeffs) > 0: logger.warning("Resetting calibration coefficients manually. " "This may introduce analysis errors. It is " "recommended to use the method fit_calib_data " "instead") self._calib_coeffs = val
def fit_polynomial(self, order=2): """Fit polynomial to data series. :param int order: order of polynomial :returns: - poly1d, the fitted polynomial """ s = self.dropna() num = len(s) if num == 1: raise ValueError("Could not fit polynomial to PixelMeanTimeSeries" " object: only one data point available") elif num == 2: logger.warning("PixelMeanTimeSeries object only contains 2 data points, " "setting polyfit order to one (default is 2)") order = 1 x = [date2num(idx) for idx in s.index] y = s.values p = poly1d(polyfit(x, y, deg=order)) self.poly_model = p return p
def load_usgs_multifits_uncompr(file_path, meta=None): if meta is None: meta = {} img = None if "filter_id" not in meta: logger.warning( "filter_id (i.e. on or off) in input arg meta not specified." "Using default filter_id=on") meta["filter_id"] = "on" try: f = fits.open(file_path) idx = 1 if meta["filter_id"] == "off" else 0 hdu = f[idx] h = hdu.header try: meta["start_acq"] = matlab_datenum_to_datetime(h["DATETIME"]) meta["texp"] = h["EXPTIME"] * h["NUMEXP"] / 1000 meta["bit_depth"] = h["BITDEPTH"] except: print_log.warning( "Failed to import image specific meta information from image " "HDU") h = f[0].header try: meta["lon"] = h["LON"] meta["lat"] = h["LAT"] meta["altitude"] = h["ALT"] meta["elev"] = h["ELEVANGL"] meta["azim"] = h["AZMTANGL"] except: print_log.warning( "Failed to import camera specific meta information from " "primary HDU of FITS file...") img = hdu.data f.close() except Exception as e: raise IOError("Failed to import image data using custom method\n" "Error message: %s" % repr(e)) return (img, meta)
def extract_files_time_ival(self, all_paths): """Extract all files belonging to specified time interval. :param list all_paths: list of image filepaths """ if not self.camera._fname_access_flags["start_acq"]: logger.warning("Acq. time information cannot be accessed from file names") return all_paths acq_time0 = self.camera.get_img_meta_from_filename(all_paths[0])[0] if acq_time0.date() == date(1900, 1, 1): paths = self._find_files_ival_time_only(all_paths) else: paths = self._find_files_ival_datetime(all_paths) if not bool(paths): print_log.warning("Error: no files could be found in specified time " "interval %s - %s" % (self.start, self.stop)) self.USE_ALL_FILES = True else: logger.info("%s files of type were found in specified time interval %s " "- %s" % (len(paths), self.start, self.stop)) return paths
def identify_camera_from_filename(filepath): """Identify camera based on image filepath convention. Parameters ---------- filepath : str valid image file path Returns ------- str ID of Camera that matches best Raises ------ IOError Exception is raised if no match can be found """ from pyplis.camera_base_info import CameraBaseInfo if not os.path.exists(filepath): logger.warning("Invalid file path") cam_id = None all_ids = get_cam_ids() max_match_num = 0 for cid in all_ids: cam = CameraBaseInfo(cid) cam.get_img_meta_from_filename(filepath) matches = sum(list(cam._fname_access_flags.values())) if matches > max_match_num: max_match_num = matches cam_id = cid if max_match_num == 0: raise IOError("Camera type could not be identified based on input" "file name {}".format(os.path.basename(filepath))) return cam_id
def _find_files_ival_datetime(self, all_paths): """Extract all files belonging to specified time interval. This function considers the datetime stamps of ``self.start`` and ``self.stop``, see also :func:`_find_files_ival_time_only` which only uses the actual time to find valid files. :param list all_paths: list of image filepaths """ paths = [] func = self.camera.get_img_meta_from_filename i, f = self.start, self.stop for path in all_paths: acq_time = func(path)[0] if i <= acq_time <= f: paths.append(path) if not bool(paths): logger.warning("Error: no files could be found in specified time " "interval %s - %s" % (self.start, self.stop)) else: logger.info("%s files of type were found in specified time interval %s " "- %s" % (len(paths), self.start, self.stop)) return paths
def get_icon(name, color=None): """Try to find icon in lib icon folder. :param str name: name of icon (i.e. filename is <name>.png) :param color (None): color of the icon ("r", "k", "g") Returns icon image filepath if valid """ try: from pyplis import _LIBDIR except BaseException: raise subfolders = ["axialis", "myIcons"] for subf in subfolders: base_path = join(_LIBDIR, "data", "icons", subf) if color is not None: base_path = join(base_path, color) for file in listdir(base_path): fname = basename(file).split(".")[0] if fname == name: return base_path + file logger.warning("Failed to load icon at: " + _LIBDIR) return False
def closest_index(time_stamp, time_stamps): """Find index of time stamp in array to other time stamp. Parameters ---------- time_stamp : datetime time stamp for which closest match is searched time_stamps : iterable ordered list of time stamps to be searched (i.e. first index is earliest, last is latest) Returns ------- int index of best match """ if time_stamp < time_stamps[0]: logger.warning("Time stamp is earlier than first time stamp in array") return 0 elif time_stamp > time_stamps[-1]: logger.warning("Time stamp is later than last time stamp in array") return len(time_stamps) - 1 return argmin([abs((time_stamp - x).total_seconds()) for x in time_stamps])
def check_filename_info_access(self, filepath): """Check which information can be accessed from file name. The access test is performed based on the filename access information specified in the :class:`Camera` object of the measurement setup Parameters ---------- filepath : str valid file path of an example image Returns ------- dict Dictionary containing information about which meta inforamtion could be identified from the image file path based on the current camera """ err = self.camera.get_img_meta_from_filename(filepath)[4] for item in err: logger.warning(item) return self.camera._fname_access_flags