def load_hdu(self, hdu, fobj=None, naxispath=None, inherit_primary_header=None): if self.io is None: # need image loader for the fromHDU() call below raise ImageError("No IO loader defined") self.clear_metadata() # collect HDU header ahdr = self.get_header() self.io.fromHDU(hdu, ahdr) # Set PRIMARY header if inherit_primary_header is None: inherit_primary_header = self.inherit_primary_header else: # This ensures get_header() is consistent self.inherit_primary_header = inherit_primary_header save_primary_header = (self.save_primary_header or inherit_primary_header) if save_primary_header and (fobj is not None): if self._primary_hdr is None: self._primary_hdr = AstroHeader() self.io.fromHDU(fobj[0], self._primary_hdr) self.setup_data(hdu.data, naxispath=naxispath) # Try to make a wcs object on the header if hasattr(self, 'wcs') and self.wcs is not None: self.wcs.load_header(hdu.header, fobj=fobj)
def imresize(self, data, new_wd, new_ht, method='bilinear'): """Scale an image in numpy array _data_ to the specified width and height. A smooth scaling is preferred. """ old_ht, old_wd = data.shape[:2] start_time = time.time() if have_pilutil: means = 'PIL' zoom_x = float(new_wd) / float(old_wd) zoom_y = float(new_ht) / float(old_ht) if (old_wd >= new_wd) or (old_ht >= new_ht): # data size is bigger, skip pixels zoom = max(zoom_x, zoom_y) else: zoom = min(zoom_x, zoom_y) newdata = imresize(data, zoom, interp=method) else: from ginga.BaseImage import ImageError raise ImageError("No way to scale image smoothly") end_time = time.time() self.logger.debug("scaling (%s) time %.4f sec" % ( means, end_time - start_time)) return newdata
def save_file_as(self, filepath, data_np, header): # TODO: save keyword metadata! if not have_pil: raise ImageError("Install 'pillow' to be able " "to save images") img = PILimage.fromarray(data_np) img.save(filepath)
def save_file_as(self, filepath, data_np, header): if not have_pil: from ginga.BaseImage import ImageError raise ImageError("Install PIL to be able to save images") # TODO: save keyword metadata! imsave(filepath, data_np)
def _imload(self, filepath, kwds): if not have_opencv: raise ImageError("Install 'opencv' to be able " "to load images") # First choice is OpenCv, because it supports high-bit depth # multiband images data_np = cv2.imread(filepath, cv2.IMREAD_ANYDEPTH + cv2.IMREAD_ANYCOLOR) # funky indexing because opencv returns BGR images, # whereas PIL and others return RGB if len(data_np.shape) >= 3 and data_np.shape[2] >= 3: data_np = data_np[..., ::-1] # OpenCv doesn't "do" image metadata, so we punt to piexif # library (if installed) self.piexif_getexif(filepath, kwds) # OpenCv added a feature to do auto-orientation when loading # (see https://github.com/opencv/opencv/issues/4344) # So reset these values to prevent auto-orientation from # happening later kwds['Orientation'] = 1 kwds['Image Orientation'] = 1 # convert to working color profile, if can if self.clr_mgr.can_profile(): data_np = self.clr_mgr.profile_to_working_numpy(data_np, kwds) return data_np
def imresize(self, data, new_wd, new_ht, method='bilinear'): """Scale an image in numpy array _data_ to the specified width and height. A smooth scaling is preferred. """ # TODO: take into account the method parameter old_ht, old_wd = data.shape[:2] start_time = time.time() if have_opencv: # First choice is OpenCv, because it supports high-bit depth # multiband images means = 'opencv' newdata = cv2.resize(data, dsize=(new_wd, new_ht), interpolation=cv2.INTER_CUBIC) elif have_pil: means = 'PIL' img = PILimage.fromarray(data) img = img.resize((new_wd, new_ht), PILimage.BICUBIC) newdata = np.array(img) else: raise ImageError("Install 'pillow' or 'opencv' to be able " "to resize RGB images") end_time = time.time() self.logger.debug("scaling (%s) time %.4f sec" % (means, end_time - start_time)) return newdata
def _imload(self, filepath, metadata): if not have_pil: raise ImageError("Install 'pillow' to be able " "to load RGB images") image = PILimage.open(filepath) kwds = metadata.get('header', None) if kwds is None: kwds = Header() metadata['header'] = kwds try: self._get_header(image, kwds) except Exception as e: self.logger.warning("Failed to get image metadata: {!r}".format(e)) # convert to working color profile, if can if self.clr_mgr.can_profile(): image = self.clr_mgr.profile_to_working_pil(image, kwds) # convert from PIL to numpy data_np = np.array(image) metadata['order'] = image.mode return data_np
def save_file_as(self, filepath, data_np, header): # TODO: save keyword metadata! if not have_opencv: raise ImageError("Install 'opencv' to be able " "to save images") # First choice is OpenCv, because it supports high-bit depth # multiband images cv2.imwrite(filepath, data_np)
def load_file(self, filepath, numhdu=None, naxispath=None): self.logger.debug("Loading file '%s' ..." % (filepath)) self.set(path=filepath) fits_f = pyfits.open(filepath, 'readonly') # this seems to be necessary now for some fits files... try: fits_f.verify('fix') except Exception, e: raise ImageError("Error loading fits file '%s': %s" % (fitspath, str(e)))
def load_hdu(self, hdu, fobj=None, naxispath=None, inherit_primary_header=None): if self.io is None: # need image loader for the fromHDU() call below raise ImageError("No IO loader defined") self.clear_metadata() # collect HDU header ahdr = self.get_header() self.io.fromHDU(hdu, ahdr) # Set PRIMARY header if inherit_primary_header is None: inherit_primary_header = self.inherit_primary_header if inherit_primary_header and (fobj is not None): if self._primary_hdr is None: self._primary_hdr = AstroHeader() self.io.fromHDU(fobj[0], self._primary_hdr) data = hdu.data if data is None: data = numpy.zeros((0, 0)) elif not isinstance(data, numpy.ndarray): data = numpy.zeros((0, 0)) elif 0 in data.shape: data = numpy.zeros((0, 0)) elif len(data.shape) < 2: # Expand 1D arrays into 1xN array data = data.reshape((1, data.shape[0])) # this is a handle to the full data array self._md_data = data # this will get reset in set_naxispath() if array is # multidimensional self._data = data if naxispath is None: naxispath = [] # Set naxispath to drill down to 2D data slice if len(naxispath) == 0: naxispath = ([0] * (len(data.shape) - 2)) self.set_naxispath(naxispath) # Try to make a wcs object on the header self.wcs.load_header(hdu.header, fobj=fobj)
def _imload(self, filepath, metadata): if not have_opencv: raise ImageError("Install 'opencv' to be able " "to load images") # First choice is OpenCv, because it supports high-bit depth # multiband images data_np = cv2.imread(filepath, cv2.IMREAD_ANYDEPTH + cv2.IMREAD_ANYCOLOR) return self._process_opencv_array(data_np, metadata, filepath)
def _imresize(self, data, new_wd, new_ht, method='bilinear'): # TODO: take into account the method parameter if not have_opencv: raise ImageError("Install 'opencv' to be able " "to resize RGB images") # First choice is OpenCv, because it supports high-bit depth # multiband images newdata = cv2.resize(data, dsize=(new_wd, new_ht), interpolation=cv2.INTER_CUBIC) return newdata
def _imresize(self, data, new_wd, new_ht, method='bilinear'): # TODO: take into account the method parameter if not have_pil: raise ImageError("Install 'pillow' to be able " "to resize RGB images") img = PILimage.fromarray(data) img = img.resize((new_wd, new_ht), PILimage.BICUBIC) newdata = np.array(img) return newdata
def save_file_as(self, filepath, data_np, header): # TODO: save keyword metadata! if not have_pil: raise ImageError("Install 'pillow' to be able " "to save images") img = PILimage.fromarray(data_np) # pillow is not happy saving images to JPG with an alpha channel img = img.convert('RGB') img.save(filepath)
def save_file_as(self, filepath, data_np, header): # TODO: save keyword metadata! if have_opencv: # First choice is OpenCv, because it supports high-bit depth # multiband images cv2.imwrite(filepath, data_np) elif have_pil: img = PILimage.fromarray(data_np) img.save(filepath) else: raise ImageError("Install 'pillow' or 'opencv' to be able " "to save images")
def _imload(self, filepath, kwds): """Load an image file, guessing the format, and return a numpy array containing an RGB image. If EXIF keywords can be read they are returned in the dict _kwds_. """ start_time = time.time() typ, enc = mimetypes.guess_type(filepath) if not typ: typ = 'image/jpeg' typ, subtyp = typ.split('/') self.logger.debug("MIME type is %s/%s" % (typ, subtyp)) if (typ == 'image') and (subtyp in ('x-portable-pixmap', 'x-portable-greymap')): # Special opener for PPM files, preserves high bit depth means = 'built-in' data_np = open_ppm(filepath) elif have_pil: means = 'PIL' image = PILimage.open(filepath) try: if hasattr(image, '_getexif'): info = image._getexif() if info is not None: for tag, value in info.items(): kwd = TAGS.get(tag, tag) kwds[kwd] = value except Exception as e: self.logger.warning("Failed to get image metadata: %s" % (str(e))) # convert to working color profile, if can if self.clr_mgr.can_profile(): image = self.clr_mgr.profile_to_working_pil(image, kwds) # convert from PIL to numpy data_np = np.array(image) else: from ginga.BaseImage import ImageError raise ImageError("No way to load image format '%s/%s'" % ( typ, subtyp)) end_time = time.time() self.logger.debug("loading (%s) time %.4f sec" % ( means, end_time - start_time)) return data_np
def set_naxispath(self, naxispath): """Choose a slice out of multidimensional data. """ revnaxis = list(naxispath) revnaxis.reverse() # construct slice view and extract it view = revnaxis + [slice(None), slice(None)] data = self.get_mddata()[view] if len(data.shape) != 2: raise ImageError( "naxispath does not lead to a 2D slice: {}".format(naxispath)) self.naxispath = naxispath self.revnaxis = revnaxis self.set_data(data)
def _imload(self, filepath, kwds): if not have_pil: raise ImageError("Install 'pillow' to be able " "to load RGB images") image = PILimage.open(filepath) kwds = {} try: self._get_header(image, kwds) except Exception as e: self.logger.warning("Failed to get image metadata: %s" % (str(e))) # convert to working color profile, if can if self.clr_mgr.can_profile(): image = self.clr_mgr.profile_to_working_pil(image, kwds) # convert from PIL to numpy data_np = np.array(image) return data_np
def set_naxispath(self, naxispath): """Choose a slice out of multidimensional data. """ revnaxis = list(naxispath) revnaxis.reverse() # construct slice view and extract it ndim = min(self.ndim, 2) view = tuple(revnaxis + [slice(None)] * ndim) data = self.get_mddata()[view] if len(data.shape) not in (1, 2): raise ImageError( "naxispath does not lead to a 1D or 2D slice: {}".format( naxispath)) self.naxispath = naxispath self.revnaxis = revnaxis self.set_data(data)
def open_file(self, filespec, **kwargs): info = iohelper.get_fileinfo(filespec) if not info.ondisk: raise ImageError("File does not appear to be on disk: %s" % (info.url)) self.fileinfo = info filepath = info.filepath self._path = filepath self.rgb_f = PILimage.open(filepath) idx = 0 extver_db = {} self.hdu_info = [] self.hdu_db = {} numframes = getattr(self.rgb_f, 'n_frames', 1) self.logger.info("number of frames: {}".format(numframes)) for idx in range(numframes): name = "frame{}".format(idx) extver = 0 # prepare a record of pertinent info about the HDU for # lookups by numerical index or (NAME, EXTVER) d = Bunch.Bunch(index=idx, name=name, extver=extver, dtype='uint8', htype='N/A') self.hdu_info.append(d) # different ways of accessing this HDU: # by numerical index self.hdu_db[idx] = d # by (hduname, extver) key = (name, extver) if key not in self.hdu_db: self.hdu_db[key] = d self.extver_db = extver_db return self
def _imresize(self, data, new_wd, new_ht, method='bilinear'): """Scale an image in numpy array _data_ to the specified width and height. A smooth scaling is preferred. """ old_ht, old_wd = data.shape[:2] start_time = time.time() if have_qtimage: # QImage method is slightly faster and gives a smoother looking # result than PIL means = 'QImage' qimage = numpy2qimage(data) qimage = qimage.scaled( new_wd, new_ht, transformMode=QtCore.Qt.SmoothTransformation) newdata = qimage2numpy(qimage) elif have_pilutil: means = 'PIL' zoom_x = float(new_wd) / float(old_wd) zoom_y = float(new_ht) / float(old_ht) if (old_wd >= new_wd) or (old_ht >= new_ht): # data size is bigger, skip pixels zoom = max(zoom_x, zoom_y) else: zoom = min(zoom_x, zoom_y) newdata = pilutil.imresize(data, zoom, interp=method) else: raise ImageError("No way to scale image smoothly") end_time = time.time() self.logger.debug("scaling (%s) time %.4f sec" % (means, end_time - start_time)) return newdata
except Exception, e: self.logger.error( "Error converting from embedded color profile: %s" % (str(e))) self.logger.warn("Leaving image unprofiled.") data_np = numpy.array(image) elif have_qtimage: means = 'QImage' qimage = QImage() qimage.load(filepath) data_np = qimage2numpy(qimage) else: raise ImageError("No way to load image format '%s/%s'" % (typ, subtyp)) end_time = time.time() self.logger.debug("loading (%s) time %.4f sec" % (means, end_time - start_time)) return data_np def imload(self, filepath, kwds): return self._imload(filepath, kwds) def _imresize(self, data, new_wd, new_ht, method='bilinear'): """Scale an image in numpy array _data_ to the specified width and height. A smooth scaling is preferred. """ old_ht, old_wd = data.shape[:2] start_time = time.time()
def _imload(self, filepath, kwds): """Load an image file, guessing the format, and return a numpy array containing an RGB image. If EXIF keywords can be read they are returned in the dict _kwds_. """ start_time = time.time() typ, enc = mimetypes.guess_type(filepath) if not typ: typ = 'image/jpeg' typ, subtyp = typ.split('/') self.logger.debug("MIME type is %s/%s" % (typ, subtyp)) data_loaded = False if have_opencv and subtyp not in ['gif']: # First choice is OpenCv, because it supports high-bit depth # multiband images means = 'opencv' data_np = cv2.imread(filepath, cv2.IMREAD_ANYDEPTH + cv2.IMREAD_ANYCOLOR) if data_np is not None: data_loaded = True # funky indexing because opencv returns BGR images, # whereas PIL and others return RGB if len(data_np.shape) >= 3 and data_np.shape[2] >= 3: data_np = data_np[..., ::-1] # OpenCv doesn't "do" image metadata, so we punt to piexif # library (if installed) self.piexif_getexif(filepath, kwds) # OpenCv added a feature to do auto-orientation when loading # (see https://github.com/opencv/opencv/issues/4344) # So reset these values to prevent auto-orientation from # happening later kwds['Orientation'] = 1 kwds['Image Orientation'] = 1 # convert to working color profile, if can if self.clr_mgr.can_profile(): data_np = self.clr_mgr.profile_to_working_numpy( data_np, kwds) if not data_loaded and have_pil: means = 'PIL' image = PILimage.open(filepath) try: if hasattr(image, '_getexif'): info = image._getexif() if info is not None: for tag, value in info.items(): kwd = TAGS.get(tag, tag) kwds[kwd] = value elif have_exif: self.piexif_getexif(image.info["exif"], kwds) else: self.logger.warning( "Please install 'piexif' module to get image metadata") except Exception as e: self.logger.warning("Failed to get image metadata: %s" % (str(e))) # convert to working color profile, if can if self.clr_mgr.can_profile(): image = self.clr_mgr.profile_to_working_pil(image, kwds) # convert from PIL to numpy data_np = np.array(image) if data_np is not None: data_loaded = True if (not data_loaded and (typ == 'image') and (subtyp in ('x-portable-pixmap', 'x-portable-greymap'))): # Special opener for PPM files, preserves high bit depth means = 'built-in' data_np = open_ppm(filepath) if data_np is not None: data_loaded = True if not data_loaded: raise ImageError("No way to load image format '%s/%s'" % (typ, subtyp)) end_time = time.time() self.logger.debug("loading (%s) time %.4f sec" % (means, end_time - start_time)) return data_np
def load_file(self, filespec, **kwargs): if self.io is None: raise ImageError("No IO loader defined") self.io.load_file(filespec, dstobj=self, **kwargs)
class AstroImage(BaseImage): """ Abstraction of an astronomical data (image). NOTE: this module is NOT thread-safe! """ def __init__(self, data_np=None, metadata=None, wcsclass=None, logger=None): if not wcsclass: wcsclass = wcs.WCS self.wcs = wcsclass() BaseImage.__init__(self, data_np=data_np, metadata=metadata, logger=logger) self.iqcalc = iqcalc.IQCalc(logger=logger) def load_hdu(self, hdu, fobj=None, naxispath=None): data = hdu.data if len(data.shape) < 2: # Expand 1D arrays into 1xN array data = data.reshape((1, data.shape[0])) else: if not naxispath: naxispath = ([0] * (len(data.shape) - 2)) for idx in naxispath: data = data[idx] self.set_data(data) # Load in FITS header self.update_keywords(hdu.header) # Preserve the ordering of the FITS keywords in the FITS file keyorder = [key for key, val in hdu.header.items()] self.set(keyorder=keyorder) # Try to make a wcs object on the header self.wcs.load_header(hdu.header, fobj=fobj) def load_file(self, filepath, numhdu=None, naxispath=None): self.logger.debug("Loading file '%s' ..." % (filepath)) self.set(path=filepath) fits_f = pyfits.open(filepath, 'readonly') # this seems to be necessary now for some fits files... try: fits_f.verify('fix') except Exception, e: raise ImageError("Error loading fits file '%s': %s" % (fitspath, str(e))) if numhdu == None: found_valid_hdu = False for i in range(len(fits_f)): hdu = fits_f[i] if hdu.data == None: # compressed FITS file or non-pixel data hdu? continue if not isinstance(hdu.data, numpy.ndarray): # We need to open a numpy array continue #print "data type is %s" % hdu.data.dtype.kind # Looks good, let's try it found_valid_hdu = True break if not found_valid_hdu: raise ImageError( "No data HDU found that Ginga can open in '%s'" % (filepath)) else: hdu = fits_f[numhdu] self.load_hdu(hdu, fobj=fits_f, naxispath=naxispath) # Set the name to the filename (minus extension) if no name # currently exists for this image name = self.get('name', None) if name == None: dirpath, filename = os.path.split(filepath) name, ext = os.path.splitext(filename) self.set(name=name) fits_f.close()
def get_data_xy(self, x, y): data = self.get_data() assert (x >= 0) and (y >= 0), \ ImageError("Indexes out of range: (x=%d, y=%d)" % ( x, y)) return data[y, x]
def save_file_as(self, filepath): if not have_pil: raise ImageError("Install PIL to be able to save images") data = self.get_data() imsave(filepath, data)