def test_viewer_at_50percent(): v = VigraQt.QImageViewer() v.setImage(qimg) v.setZoomLevel(-1) assert v.zoomFactor() == 0.5 v.resize(qimg.size() * 2) out = getWidgetImage(v) assert numpy.all(rgb_view(out)[96:-96, 96:-96] == rgb_view(reference)[::2, ::2])
def test_viewer_at_100percent(): v = VigraQt.QImageViewer() v.setImage(qimg) assert v.zoomFactor() == 1.0 assert v.sizeHint() == qimg.size() v.resize(qimg.size()) out = getWidgetImage(v) assert numpy.all(rgb_view(out) == rgb_view(reference))
def test_viewer_at_400percent(): v = VigraQt.QImageViewer() v.setImage(qimg) v.setZoomLevel(3) assert v.zoomFactor() == 4.0 v.resize(qimg.size() * 2) out = getWidgetImage(v) for yo in range(4): for xo in range(4): assert numpy.all(rgb_view(out)[xo::4, yo::4] == rgb_view(reference)[32:-32, 32:-32])
def MakeDiffImage(self, input, last): #input is the video stream, last is the last grabbed frame #make a QImage with the diff from these two QImages import numpy as np import qimage2ndarray npinput = qimage2ndarray.rgb_view(input) nplast = qimage2ndarray.rgb_view(last) #nplast = nplast/2 + npinput/2 #print type(npinput) qImage = qimage2ndarray.array2qimage(npinput, normalize = False) # create QImage from ndarray return qImage
def test_viewer_at_200percent(): v = VigraQt.QImageViewer() v.setImage(qimg) v.setZoomLevel(1) assert v.zoomFactor() == 2.0 assert v.sizeHint() == qimg.size() * 2 v.resize(qimg.size() * 2) out = getWidgetImage(v) for yo in (0, 1): for xo in (0, 1): assert numpy.all(rgb_view(out)[xo::2, yo::2] == rgb_view(reference))
def next(self): pageCount = self._doc.numPages() if self._pageIndex >= pageCount: raise StopIteration sys.stdout.write("\rrendering page %d / %d..." % (self._pageIndex + 1, pageCount)) sys.stdout.flush() page = self._doc.page(self._pageIndex) assert page renderSize = QtCore.QSize(page.pageSize()) if self._sizePX: widthPX, heightPX = self._sizePX renderSize.scale(widthPX, heightPX, QtCore.Qt.KeepAspectRatio) scale = renderSize.width() / page.pageSize().width() qImg = page.renderToImage(scale * 72, scale * 72) result = qimage2ndarray.rgb_view(qImg) self._pageIndex += 1 if self._pageIndex == pageCount: print return result
def endDrawing(self, pos): has_moved = self._hasMoved # _hasMoved will change after calling moveTo if has_moved: self.moveTo(pos) else: assert(self.pos == pos) self.moveTo(QPointF(pos.x()+0.0001, pos.y()+0.0001)) # move a little tempi = QImage(QSize(self.bb.width(), self.bb.height()), QImage.Format_ARGB32_Premultiplied) #TODO: format tempi.fill(0) painter = QPainter(tempi) self.scene.render(painter, target=QRectF(), source=QRectF(QPointF(self.bb.x(), self.bb.y()), QSizeF(self.bb.width(), self.bb.height()))) painter.end() ndarr = qimage2ndarray.rgb_view(tempi)[:,:,0] labels = numpy.where(ndarr>0,numpy.uint8(self.drawnNumber),numpy.uint8(0)) labels = labels.swapaxes(0,1) assert labels.shape[0] == self.bb.width() assert labels.shape[1] == self.bb.height() ## ## ensure that at least one pixel is label when the brush size is 1 ## ## this happens when the user just clicked without moving ## in that case the lineitem will be so tiny, that it won't be rendered ## into a single pixel by the code above if not has_moved and self.brushSize <= 1 and numpy.count_nonzero(labels) == 0: labels[labels.shape[0]//2, labels.shape[1]//2] = self.drawnNumber self.brushStrokeAvailable.emit(QPointF(self.bb.x(), self.bb.y()), labels)
def StartJulia(self): import qimage2ndarray data = qimage2ndarray.rgb_view(self.image) self.ui.progressBar.setVisible(True) self.ui.StartButton.setEnabled(False) # Spawn the thread self.time = QtCore.QTime() self.time.start() self.ThreadJulia.Julia(data, self.ui.radioGPU.isChecked())
def endDrawing(self, pos): has_moved = self._hasMoved # _hasMoved will change after calling moveTo if has_moved: self.moveTo(pos) else: assert self.pos == pos self.moveTo(QPointF(pos.x() + 0.0001, pos.y() + 0.0001)) # move a little # Qt seems to use strange rules for determining which pixels to set when rendering a brush stroke to a QImage. # We seem to get better results if we do the following: # 1) Slightly offset the source window because apparently there is a small shift in the data # 2) Render the scene to an image that is MUCH larger than the scene resolution (4x by 4x) # 3) Downsample each 4x4 patch from the large image back to a single pixel in the final image, # applying some threshold to determine if the final pixel is on or off. tempi = QImage( QSize(4 * self.bb.width(), 4 * self.bb.height()), QImage.Format_ARGB32_Premultiplied ) # TODO: format tempi.fill(0) painter = QPainter(tempi) # Offset the source window. At first I thought the right offset was 0.5, because # that would seem to make sure points are rounded to pixel CENTERS, but # experimentation indicates that 0.25 is slightly better for some reason... source_rect = QRectF(QPointF(self.bb.x() + 0.25, self.bb.y() + 0.25), QSizeF(self.bb.width(), self.bb.height())) target_rect = QRectF(QPointF(0, 0), QSizeF(4 * self.bb.width(), 4 * self.bb.height())) self.scene.render(painter, target=target_rect, source=source_rect) painter.end() # Now downsample: convert each 4x4 patch into a single pixel by summing and dividing ndarr = qimage2ndarray.rgb_view(tempi)[:, :, 0].astype(int) ndarr = ndarr.reshape((ndarr.shape[0],) + (ndarr.shape[1] // 4,) + (4,)) ndarr = ndarr.sum(axis=-1) ndarr = ndarr.transpose() ndarr = ndarr.reshape((ndarr.shape[0],) + (ndarr.shape[1] // 4,) + (4,)) ndarr = ndarr.sum(axis=-1) ndarr = ndarr.transpose() ndarr //= 4 * 4 downsample_threshold = (7.0 / 16) * 255 labels = numpy.where(ndarr >= downsample_threshold, numpy.uint8(self.drawnNumber), numpy.uint8(0)) labels = labels.swapaxes(0, 1) assert labels.shape[0] == self.bb.width() assert labels.shape[1] == self.bb.height() ## ## ensure that at least one pixel is label when the brush size is 1 ## ## this happens when the user just clicked without moving ## in that case the lineitem will be so tiny, that it won't be rendered ## into a single pixel by the code above if not has_moved and self.brushSize <= 1 and numpy.count_nonzero(labels) == 0: labels[labels.shape[0] // 2, labels.shape[1] // 2] = self.drawnNumber self.brushStrokeAvailable.emit(QPointF(self.bb.x(), self.bb.y()), labels)
def sliceImg(width, height, axisLabels, perpAxisLabel, perpAxisValue): print(perpAxisLabel, perpAxisValue) img = QImage(width, height, QImage.Format_ARGB32) img.fill(0) p = QPainter(img) p.setPen(QColor(255, 255, 255)) p.setBrush(QBrush(QColor(255, 255, 255))) def arrow(p, From, To, label): p.drawLine(From, To) p.drawText(To, label) offset = 10 arrow(p, QPoint(offset, offset), QPoint(offset, height - offset), axisLabels[1]) arrow(p, QPoint(offset, offset), QPoint(width - offset, offset), axisLabels[0]) p.drawText(2 * offset, 2 * offset, "%s=%d" % (perpAxisLabel, perpAxisValue)) fm = p.fontMetrics() size = fm.size(Qt.TextSingleLine, "updown") p.drawText( numpy.random.randint(offset, width - offset - size.width()), numpy.random.randint(offset, height - offset - size.height()), "updown", ) dots = [] numPixels = 0 while numPixels < 30: r = numpy.random.randint(1, 255) rx, ry = numpy.random.randint(offset, width - offset), numpy.random.randint(offset, height - offset) if img.pixel(rx, ry) != 0: continue p.setPen(QPen(QColor(r, r, r))) p.drawPoint(rx, ry) dots.append(((rx, ry), r)) numPixels += 1 p.end() img.save("test.png") a = qimage2ndarray.rgb_view(img) a = a[:, :, 0].squeeze().swapaxes(0, 1) for (rx, ry), r in dots: assert QColor.fromRgba(img.pixel(rx, ry)).red() == r, "QColor.fromRgba(img.pixel(rx,ry)).red() == %d != %d" % ( QColor.fromRgba(img.pixel(rx, ry)).red(), r, ) assert a[rx, ry] == r, "a[%d,%d] == %d != %d)" % (rx, ry, a[rx, ry], r) return (a, dots)
def getScene(self): instr = QtCore.QDataStream(self.tcpSocket) instr.setVersion(QtCore.QDataStream.Qt_4_0) while True: if self.blockSize == 0: if self.tcpSocket.bytesAvailable() < 4: return self.blockSize = instr.readUInt32() if self.tcpSocket.bytesAvailable() < self.blockSize: return self.blockSize = 0 pix = QtGui.QImage() ba = QtCore.QByteArray() instr >> ba if self.tcpSocket.bytesAvailable() > 0: rospy.logdebug("Image dropped") continue ba = QtCore.qUncompress(ba) if not pix.loadFromData(ba): rospy.logerr("Failed to load image from received data") if not self.is_calibrated() or self.calibrating: return img = pix.convertToFormat(QtGui.QImage.Format_ARGB32) v = qimage2ndarray.rgb_view(img) # TODO gpu image_np = cv2.warpPerspective( v, self.h_matrix, (self.width(), self.height()) ) # , flags = cv2.INTER_LINEAR height, width, channel = image_np.shape bytesPerLine = 3 * width image = QtGui.QPixmap.fromImage( QtGui.QImage(image_np.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888) ) self.pix_label.setPixmap(image) self.update() return
def start(self): # qimage2ndarray import(s) import qimage2ndarray self.ui.startButton.setEnabled(False) self.ui.stopButton.setEnabled(True) self.rows = numpy.int32(512) self.columns = numpy.int32(256) self.threads = 16 image = QtGui.QImage(self.rows, self.columns, QtGui.QImage.Format_RGB32) self.data = qimage2ndarray.rgb_view(image) # Needs a contiguos buffer self.data = numpy.copy(self.data) self.ticks = numpy.zeros(1, dtype=numpy.int32) self.timer.start(16.667)
def endDrawing(self, pos): print "BrushingModel.endDrawing(pos=%r)" % (pos) self.moveTo(pos) tempi = QImage(QSize(self.bb.width(), self.bb.height()), QImage.Format_ARGB32_Premultiplied) #TODO: format tempi.fill(0) painter = QPainter(tempi) self.scene.render(painter, target=QRectF(), source=QRectF(QPointF(self.bb.x(), self.bb.y()), QSizeF(self.bb.width(), self.bb.height()))) painter.end() ndarr = qimage2ndarray.rgb_view(tempi)[:,:,0] labels = numpy.where(ndarr>0,numpy.uint8(self.drawnNumber),numpy.uint8(0)) self.brushStrokeAvailable.emit(QPointF(self.bb.x(), self.bb.y()), labels)
def go(self): import qimage2ndarray image = QtGui.QImage(self.rows, self.columns, QtGui.QImage.Format_RGB32) self.data = qimage2ndarray.rgb_view(image) # Needs a contiguos buffer self.data = numpy.copy(self.data) self.spheres = numpy.array(self.CreateSpheres()) # Init CUDA cuda.init() # Create CUDA Context ctx = pycuda.tools.make_default_context() # Declare event(s) startEvent = cuda.Event() stopEvent = cuda.Event() # Memory on Device gpu_alloc = cuda.mem_alloc(self.data.nbytes) gpu_rows = cuda.mem_alloc(self.rows.nbytes) gpu_columns = cuda.mem_alloc(self.columns.nbytes) # Copy data from Host to Device cuda.memcpy_htod(gpu_rows, self.rows) cuda.memcpy_htod(gpu_columns, self.columns) # Execute on host mod = SourceModule(code) gpu_spheres = mod.get_global("spheres") cuda.memcpy_htod(gpu_spheres[0], self.spheres) kernel = mod.get_function("RayTracer") startEvent.record() kernel(gpu_alloc, gpu_rows, gpu_columns, block=(self.threads, self.threads, 1), grid=(int(self.rows / self.threads), int(self.columns / self.threads))) stopEvent.record() stopEvent.synchronize() print("Time elapsed: %fms" % startEvent.time_till(stopEvent)) # Copy data from Device to Host cuda.memcpy_dtoh(self.data, gpu_alloc) ctx.pop() self.SetImage(self.data)
def blend_images(image_list): blend_img = [] for img in image_list: img_q = img.qimage() blend_img.append(img_q) qimage = blend_images_max(blend_img) result = qimage2ndarray.rgb_view(QtGui.QImage(qimage)) mask_img = numpy.zeros(image_list[0].array.shape) for img in image_list: mask_img += img.array mask_img = mask_img > 0 tmp_ = result.sum(2) > 0 result[mask_img]+=1 result[tmp_]-=1 result[result.sum(2) == 0] = 64 return result
def test_rgb_view(): qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32) qimg.fill(23) v = qimage2ndarray.rgb_view(qimg) qimg.setPixel(12, 10, QtGui.qRgb(12,34,56)) assert_equal(list(v[10,12]), [12,34,56])
def imread(filename): filename = os.path.join(os.path.dirname(__file__), filename) return qimage2ndarray.rgb_view(QtGui.QImage(filename))
def fillArea(self, remove_closed_contour=False, remove_only_current_color=True): # Store previous state so we can go back to it self._overlay_stack.append(self.mask_pixmap.copy()) if self.direct_mask_paint: self._offscreen_mask_stack.append(self._offscreen_mask.copy()) # We first convert the mask to a QImage and then to ndarray orig_mask = self.mask_pixmap.toImage().convertToFormat( QImage.Format_ARGB32) msk = alpha_view(orig_mask).copy() # Apply simple tresholding and invert the image msk[np.where((msk > 0))] = 255 msk = 255 - msk msk1 = np.copy(msk) if remove_closed_contour: msk1 = 255 - msk1 if remove_closed_contour: if remove_only_current_color: the_mask = np.ones(msk1.shape[:2], np.uint8) * 255 # Initial mask fullmask = self.export_ndarray_noalpha( ) # Get the colored version reds, greens, blues = fullmask[:, :, 0], fullmask[:, :, 1], fullmask[:, :, 2] cur_col = list(self.brush_fill_color.getRgb() )[:-1] # Only current color is considered # So that fill happens only for this specific color the_mask[ np.isclose(reds, cur_col[0], atol=PIXMAP_CONV_BUG_ATOL) & np.isclose(greens, cur_col[1], atol=PIXMAP_CONV_BUG_ATOL) & np.isclose(blues, cur_col[2], atol=PIXMAP_CONV_BUG_ATOL)] = 0 else: the_mask = np.zeros(msk1.shape[:2], np.uint8) else: the_mask = cv2.bitwise_not(np.copy(msk)) the_mask = cv2.copyMakeBorder(the_mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0) # Fill the contour seed_point = (int(self.lastCursorLocation.x()), int(self.lastCursorLocation.y())) cv2.floodFill(msk1, the_mask, seed_point, 0, 0, 1) # We paint in only the newly arrived pixels (or remove the pixels in the contour) if remove_closed_contour: paintin = msk1 else: paintin = msk - msk1 # This is fill case # Take original pixmap image: it has two components, RGB and ALPHA new_img = np.dstack((rgb_view(orig_mask), alpha_view(orig_mask))) # Fill the newly created area with current brush color if not remove_closed_contour: new_img[np.where( (paintin == 255))] = list(self.brush_fill_color.getRgb()) else: new_img[np.where((paintin == 0))] = (0, 0, 0, 0) # Erase new_qimg = array2qimage(new_img) # In case of direct drawing, need to update the offscreen mask as well if self.direct_mask_paint: omask = byte_view(self._offscreen_mask).copy() omask = omask.reshape(omask.shape[:-1]) if not remove_closed_contour: tc = self.d_rgb2gray[self.brush_fill_color.name()] omask[np.where((paintin == 255))] = tc else: omask[np.where((paintin == 0))] = 0 self._offscreen_mask = QImage(omask.data, omask.shape[1], omask.shape[0], omask.strides[0], QImage.Format_Grayscale8) # Finally update the screen stuff self.mask_pixmap = QPixmap.fromImage(new_qimg) self._overlayHandle.setPixmap(self.mask_pixmap)
def test_rgb_view(): qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32) qimg.fill(23) v = qimage2ndarray.rgb_view(qimg) qimg.setPixel(12, 10, QtGui.qRgb(12, 34, 56)) assert_equal(list(v[10, 12]), [12, 34, 56])
def convertToCV(self): self._array = qim.rgb_view(self._image, True) self._array = cv2.cvtColor(self._array, cv2.cv.CV_RGB2BGR)
def export_ndarray(self): mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32) return np.dstack((rgb_view(mask).copy(), alpha_view(mask).copy()))
def export_ndarray_noalpha(self): mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32) return rgb_view(mask).copy()
def convertToCV(self): self._array = qim.rgb_view(self._image, True) self._array = cv2.cvtColor(self._array, cv2.cv.CV_RGB2BGR) cv2.imshow("cvimage", self._array) cv2.waitKey(0) cv2.destroyAllWindows()
def convertToCV(self): """ Convert a QImage to an array usable by OpenCV. """ self._array = qim.rgb_view(self._image, True) self._array = cv2.cvtColor(self._array, cv2.COLOR_RGB2BGR)
def qimage2numpy(qimage): '''Convert QImage to a uint8 RGB numpy array.''' return q2n.rgb_view(qimage)
def endDrawing(self, pos): has_moved = self._hasMoved # _hasMoved will change after calling moveTo if has_moved: self.moveTo(pos) else: assert self.pos == pos self.moveTo(QPointF(pos.x() + 0.0001, pos.y() + 0.0001)) # move a little # Qt seems to use strange rules for determining which pixels to set when rendering a brush stroke to a QImage. # We seem to get better results if we do the following: # 1) Slightly offset the source window because apparently there is a small shift in the data # 2) Render the scene to an image that is MUCH larger than the scene resolution (4x by 4x) # 3) Downsample each 4x4 patch from the large image back to a single pixel in the final image, # applying some threshold to determine if the final pixel is on or off. tempi = QImage(QSize(4 * self.bb.width(), 4 * self.bb.height()), QImage.Format_ARGB32_Premultiplied) # TODO: format tempi.fill(0) painter = QPainter(tempi) # Offset the source window. At first I thought the right offset was 0.5, because # that would seem to make sure points are rounded to pixel CENTERS, but # experimentation indicates that 0.25 is slightly better for some reason... source_rect = QRectF(QPointF(self.bb.x() + 0.25, self.bb.y() + 0.25), QSizeF(self.bb.width(), self.bb.height())) target_rect = QRectF(QPointF(0, 0), QSizeF(4 * self.bb.width(), 4 * self.bb.height())) self.scene.render(painter, target=target_rect, source=source_rect) painter.end() # Now downsample: convert each 4x4 patch into a single pixel by summing and dividing ndarr = qimage2ndarray.rgb_view(tempi)[:, :, 0].astype(int) ndarr = ndarr.reshape((ndarr.shape[0], ) + (ndarr.shape[1] // 4, ) + (4, )) ndarr = ndarr.sum(axis=-1) ndarr = ndarr.transpose() ndarr = ndarr.reshape((ndarr.shape[0], ) + (ndarr.shape[1] // 4, ) + (4, )) ndarr = ndarr.sum(axis=-1) ndarr = ndarr.transpose() ndarr //= 4 * 4 downsample_threshold = (7.0 / 16) * 255 labels = numpy.where(ndarr >= downsample_threshold, numpy.uint8(self.drawnNumber), numpy.uint8(0)) labels = labels.swapaxes(0, 1) assert labels.shape[0] == self.bb.width() assert labels.shape[1] == self.bb.height() ## ## ensure that at least one pixel is label when the brush size is 1 ## ## this happens when the user just clicked without moving ## in that case the lineitem will be so tiny, that it won't be rendered ## into a single pixel by the code above if not has_moved and self.brushSize <= 1 and numpy.count_nonzero( labels) == 0: labels[labels.shape[0] // 2, labels.shape[1] // 2] = self.drawnNumber self.brushStrokeAvailable.emit(QPointF(self.bb.x(), self.bb.y()), labels)