def setImage(self, lut, colStart, colEnd, cmapInv): # takes in a piece of spectrogram and produces a pair of images if cmapInv: im, alpha = fn.makeARGB(self.spec, lut=lut, levels=[colEnd, colStart]) else: im, alpha = fn.makeARGB(self.spec, lut=lut, levels=[colStart, colEnd]) im1 = fn.makeQImage(im, alpha) if im1.size().width() == 0: print( "ERROR: button not shown, likely bad spectrogram coordinates") return # hardcode all image sizes if self.cluster: self.im1 = im1.scaled(200, 150) else: self.specReductionFact = im1.size().width() / 500 self.im1 = im1.scaled(500, im1.size().height()) # draw lines if not self.cluster: unbufStartAdj = self.unbufStart / self.specReductionFact unbufStopAdj = self.unbufStop / self.specReductionFact self.line1 = QLineF(unbufStartAdj, 0, unbufStartAdj, im1.size().height()) self.line2 = QLineF(unbufStopAdj, 0, unbufStopAdj, im1.size().height())
def time_test(self): data = getattr(self, dtype + '_data') makeARGB( data['data'], lut=getattr(self, lut_name + '_lut', None), levels=use_levels and data['levels'], )
def setup(self): size = (self.size, self.size) self.float_data, self.uint16_data, self.uint8_data, self.uint16_lut, self.uint8_lut = self._create_data( size, np) self.output = np.zeros(size + (4, ), dtype=np.ubyte) makeARGB(self.uint16_data["data"]) # prime the cpu if cp: self.cupy_output = cp.zeros(size + (4, ), dtype=cp.ubyte) makeARGB(cp.asarray(self.uint16_data["data"])) # prime the gpu
def paintGL(self): if self.image is None: if self.opts is None: return img, args, kwds = self.opts kwds['useRGBA'] = True self.image, alpha = fn.makeARGB(img, *args, **kwds) if not self.uploaded: self.uploadTexture() glViewport(0, 0, self.width(), self.height()) glEnable(GL_TEXTURE_2D) glBindTexture(GL_TEXTURE_2D, self.texture) glColor4f(1,1,1,1) glBegin(GL_QUADS) glTexCoord2f(0,0) glVertex3f(-1,-1,0) glTexCoord2f(1,0) glVertex3f(1, -1, 0) glTexCoord2f(1,1) glVertex3f(1, 1, 0) glTexCoord2f(0,1) glVertex3f(-1, 1, 0) glEnd() glDisable(GL_TEXTURE_3D)
def paintEvent(self, ev): if self.opts is None: return if self.image is None: argb, alpha = fn.makeARGB(self.opts[0], *self.opts[1], **self.opts[2]) self.image = fn.makeQImage(argb, alpha) self.opts = () #if self.pixmap is None: #self.pixmap = QtGui.QPixmap.fromImage(self.image) p = QtGui.QPainter(self) if self.scaled: rect = self.rect() ar = rect.width() / float(rect.height()) imar = self.image.width() / float(self.image.height()) if ar > imar: rect.setWidth(int(rect.width() * imar / ar)) else: rect.setHeight(int(rect.height() * ar / imar)) p.drawImage(rect, self.image) else: p.drawImage(QtCore.QPointF(), self.image) #p.drawPixmap(self.rect(), self.pixmap) p.end()
def _draw_ndi(): img_data = np.NINF * np.ones((2, 2)) argb, alpha = pgfuncs.makeARGB(img_data, lut=self._lut, levels=self._lut_levels, ) self._raw_image = pgfuncs.makeQImage(argb, alpha, transpose=False)
def time_test(self): data = getattr(self, dtype + "_data") levels = data["levels"] if use_levels else None lut = getattr(self, lut_name + "_lut", None) if lut_name is not None else None for _ in range(10): img_data = data["data"] output = self.output if use_cupy: img_data = cp.asarray(img_data) output = self.cupy_output makeARGB( img_data, lut=lut, levels=levels, output=output, ) if use_cupy: output.get(out=self.output)
def render(self): prof = debug.Profiler('ImageItem.render', disabled=True) if self.image is None: return if callable(self.lut): lut = self.lut(self.image) else: lut = self.lut argb, alpha = fn.makeARGB(self.image, lut=lut, levels=self.levels) self.qimage = fn.makeQImage(argb, alpha) #self.pixmap = QtGui.QPixmap.fromImage(self.qimage) prof.finish()
def render(self): prof = debug.Profiler('ImageItem.render', disabled=True) if self.image is None: return if isinstance(self.lut, collections.Callable): lut = self.lut(self.image) else: lut = self.lut #print lut.shape #print self.lut argb, alpha = fn.makeARGB(self.image, lut=lut, levels=self.levels) self.qimage = fn.makeQImage(argb, alpha) prof.finish()
def render(self): prof = debug.Profiler('ImageItem.render', disabled=True) if self.image is None: return if callable(self.lut): lut = self.lut(self.image) else: lut = self.lut #print lut.shape #print self.lut argb, alpha = fn.makeARGB(self.image, lut=lut, levels=self.levels) self.qimage = fn.makeQImage(argb, alpha) #self.pixmap = QtGui.QPixmap.fromImage(self.qimage) prof.finish()
def _create_image(self, img_data): """This is the new image handler in the render pipeline. Regenerates a completely new image, given the new img_data. """ assert threading.current_thread().name == self._render_thread_name assert img_data.ndim == 2 dlog("_create_image called and re-assigning raw image dimensions") with self._mutex: #self._raw_image_height is dictated by the widget height, but the #raw img width comes from the underlying data model... self._raw_image_width = img_data.shape[1] #only keep a record of those rows that have data... # - soemthing is backwards here... we should be able to have the # real data instead of going backwards from img data. # - FIXME populated_row_filter = img_data[:, 0] != np.NINF populated_img_data = img_data[populated_row_filter, :] self._src_data = collections.deque(populated_img_data) argb, alpha = pgfuncs.makeARGB(img_data, lut=self._lut, levels=self._lut_levels, ) tmp_img = pgfuncs.makeQImage(argb, alpha, transpose=False) # We now have a fully generated tmp_image. We need to prep the # doubled-up ring buffer by setting up two copies and initializing # our current frame pointer offset... self.__ring_buffer = np.vstack((tmp_img.data, tmp_img.data)) dlog("ring buffer size set to %r" % (self.__ring_buffer.shape, )) self.__cur_buffer_row = self._raw_image_height #set our _qimage to point at the proper location in the ring buffer... self._point_raw_image_at_cur_offset() self._image_ready = True
def paintEvent(self, ev): if self.opts is None: return if self.image is None: argb, alpha = fn.makeARGB(self.opts[0], *self.opts[1], **self.opts[2]) self.image = fn.makeQImage(argb, alpha) self.opts = () #if self.pixmap is None: #self.pixmap = QtGui.QPixmap.fromImage(self.image) p = QtGui.QPainter(self) if self.scaled: rect = self.rect() ar = rect.width() / float(rect.height()) imar = self.image.width() / float(self.image.height()) if ar > imar: rect.setWidth(int(rect.width() * imar/ar)) else: rect.setHeight(int(rect.height() * ar/imar)) p.drawImage(rect, self.image) else: p.drawImage(QtCore.QPointF(), self.image) #p.drawPixmap(self.rect(), self.pixmap) p.end()
def produceARGB(self): try: if self._numQueuedImages > 1: return # Skip this frame in order to catch up image, lut, levels, viewBounds, onlyRenderVisible = ( self._image, self._lut, self._levels, self._viewBounds, self._onlyRenderVisible) if onlyRenderVisible: # Only render the part of the image that is visible originalImageShape = image.shape renderBounds = (math.floor(viewBounds[1][0]), math.ceil(viewBounds[1][1]), math.floor(viewBounds[0][0]), math.ceil(viewBounds[0][1])) image = image[renderBounds[0]:renderBounds[1], renderBounds[2]:renderBounds[3]] argb, alpha = fn.makeARGB(image, lut=lut, levels=levels) if onlyRenderVisible: argbFull = np.zeros((*originalImageShape, argb.shape[2]), dtype=argb.dtype) argbFull[renderBounds[0]:renderBounds[1], renderBounds[2]:renderBounds[3]] = argb else: argbFull = argb qimage = fn.makeQImage(argbFull, alpha, transpose=False) self.qimageProduced.emit(qimage) finally: self._numQueuedImagesMutex.lock() self._numQueuedImages -= 1 self._numQueuedImagesMutex.unlock()
def render(self): # Convert data to QImage for display. profile = debug.Profiler() if self.image is None or self.image.size == 0: return if isinstance(self.lut, collections.Callable): lut = self.lut(self.image) else: lut = self.lut if self.logScale: image = self.image + 1 with np.errstate(invalid="ignore"): image = image.astype(np.float) np.log(image, where=image >= 0, out=image) # map to 0-255 else: image = self.image if self.autoDownsample: # reduce dimensions of image based on screen resolution o = self.mapToDevice(QPointF(0, 0)) x = self.mapToDevice(QPointF(1, 0)) y = self.mapToDevice(QPointF(0, 1)) w = Point(x - o).length() h = Point(y - o).length() if w == 0 or h == 0: self.qimage = None return xds = max(1, int(1.0 / w)) yds = max(1, int(1.0 / h)) axes = [1, 0] if self.axisOrder == "row-major" else [0, 1] image = fn.downsample(image, xds, axis=axes[0]) image = fn.downsample(image, yds, axis=axes[1]) self._lastDownsample = (xds, yds) else: pass # if the image data is a small int, then we can combine levels + lut # into a single lut for better performance levels = self.levels if levels is not None and levels.ndim == 1 and image.dtype in ( np.ubyte, np.uint16): if self._effectiveLut is None: eflsize = 2**(image.itemsize * 8) ind = np.arange(eflsize) minlev, maxlev = levels levdiff = maxlev - minlev levdiff = 1 if levdiff == 0 else levdiff # don't allow division by 0 if lut is None: efflut = fn.rescaleData(ind, scale=255.0 / levdiff, offset=minlev, dtype=np.ubyte) else: lutdtype = np.min_scalar_type(lut.shape[0] - 1) efflut = fn.rescaleData(ind, scale=(lut.shape[0] - 1) / levdiff, offset=minlev, dtype=lutdtype, clip=(0, lut.shape[0] - 1)) efflut = lut[efflut] self._effectiveLut = efflut lut = self._effectiveLut levels = None # Assume images are in column-major order for backward compatibility # (most images are in row-major order) if self.axisOrder == "col-major": image = image.transpose((1, 0, 2)[:image.ndim]) if self.logScale: with np.errstate(invalid="ignore"): levels = np.log(np.add(levels, 1)) levels[0] = np.nanmax([levels[0], 0]) argb, alpha = fn.makeARGB(image, lut=lut, levels=levels) self.qimage = fn.makeQImage(argb, alpha, transpose=False)
def render(self): # Convert data to QImage for display. profile = debug.Profiler() if self.image is None or self.image.size == 0: return # Request a lookup table if this image has only one channel if self.image.ndim == 2 or self.image.shape[2] == 1: if isinstance(self.lut, collections.Callable): lut = self.lut(self.image) else: lut = self.lut else: lut = None if self.autoDownsample: # reduce dimensions of image based on screen resolution o = self.mapToDevice(QtCore.QPointF(0,0)) x = self.mapToDevice(QtCore.QPointF(1,0)) y = self.mapToDevice(QtCore.QPointF(0,1)) w = Point(x-o).length() h = Point(y-o).length() if w == 0 or h == 0: self.qimage = None return xds = max(1, int(1.0 / w)) yds = max(1, int(1.0 / h)) axes = [1, 0] if self.axisOrder == 'row-major' else [0, 1] image = fn.downsample(self.image, xds, axis=axes[0]) image = fn.downsample(image, yds, axis=axes[1]) self._lastDownsample = (xds, yds) else: image = self.image # if the image data is a small int, then we can combine levels + lut # into a single lut for better performance levels = self.levels if levels is not None and levels.ndim == 1 and image.dtype in (np.ubyte, np.uint16): if self._effectiveLut is None: eflsize = 2**(image.itemsize*8) ind = np.arange(eflsize) minlev, maxlev = levels levdiff = maxlev - minlev levdiff = 1 if levdiff == 0 else levdiff # don't allow division by 0 if lut is None: efflut = fn.rescaleData(ind, scale=255./levdiff, offset=minlev, dtype=np.ubyte) else: lutdtype = np.min_scalar_type(lut.shape[0]-1) efflut = fn.rescaleData(ind, scale=(lut.shape[0]-1)/levdiff, offset=minlev, dtype=lutdtype, clip=(0, lut.shape[0]-1)) efflut = lut[efflut] self._effectiveLut = efflut lut = self._effectiveLut levels = None # Convert single-channel image to 2D array if image.ndim == 3 and image.shape[-1] == 1: image = image[..., 0] # Assume images are in column-major order for backward compatibility # (most images are in row-major order) if self.axisOrder == 'col-major': image = image.transpose((1, 0, 2)[:image.ndim]) argb, alpha = fn.makeARGB(image, lut=lut, levels=levels) if self.alpha is not None: argb[:,:,3] = self.alpha.T self.qimage = fn.makeQImage(argb, True, transpose=False)