def select_image(self): if (self.ui.property_2.currentText()=="T1"): array = (self.T1 - np.amin(self.T1)) * 255/ (np.amax(self.T1) - np.amin(self.T1)) image_T1=gray2qimage(array) ### byh7wel el array to image self.show_image(image_T1) elif (self.ui.property_2.currentText()=="T2"): array = (self.T2 - np.amin(self.T2)) * 255/ (np.amax(self.T2) - np.amin(self.T2)) image_T2=gray2qimage(array) self.show_image(image_T2) elif (self.ui.property_2.currentText()=="Phantom"): image_phantom=gray2qimage(self.img_array) self.show_image(image_phantom)
def iterQImages(self): """Iterator over qimages (indexed_8) with colortable set.""" iinfo = np.iinfo(self.image.dtype) ncolors = abs(iinfo.max - iinfo.min) + 1 luts = [AtPainter.lut_from_color(QColor(c), ncolors) for c in self.colors] for i, color in enumerate(self.colors): if self.image.ndim == 2: image = gray2qimage(self.image, normalize=False) else: image = gray2qimage(self.image[:, :, i], normalize=False) image.setColorTable(luts[i]) yield image
def test_bool2qimage_normalize(): a = numpy.zeros((240, 320), dtype = bool) a[12,10] = True # normalization should scale to 0/255 # (not raise a numpy exception, see issue #17) qImg = qimage2ndarray.gray2qimage(a, normalize = True) assert not qImg.isNull() assert_equal(qImg.width(), 320) assert_equal(qImg.height(), 240) assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(255,255,255))) assert_equal(hex(qImg.pixel(0,0)), hex(QtGui.qRgb(0,0,0))) a[:] = True qImg = qimage2ndarray.gray2qimage(a, normalize = True) # for boolean arrays, I would assume True should always map to 255 assert_equal(hex(qImg.pixel(0,0)), hex(QtGui.qRgb(255,255,255)))
def run(self): qimage=qimage2ndarray.gray2qimage(pyfits.getdata(self.i), normalize = True).scaledToWidth(166) for n in range(256): qimage.setColor (n, qRgb(*self.colormap[n][:3])) mutex.lock() thumbs[self.i]=qimage mutex.unlock()
def paintEvent(self, e): super().paintEvent(e) #raw image pixel dimensions default_w = 512 default_h = 512 img = qnd.gray2qimage(self.activeframe, normalize=(0, self.maxintens)) pix = QtGui.QPixmap.fromImage(img) qp = QtGui.QPainter(pix) pen = QPen(Qt.red, 12) qp.setPen(pen) if self.trap_positions is not None: for i in np.arange(len(self.trap_positions)): pos = QPoint((self.trap_positions[i][1] - 10), (self.trap_positions[i][0] - 15)) qp.drawText(pos, str(self.labels[i])) pen = QPen(Qt.white, 2) qp.setPen(pen) for i in np.arange(len(self.trap_positions)): qp.drawRect(self.trap_positions[i][1] - 10, self.trap_positions[i][0] - 15, 30, 30) qp.end() self.setPixmap(pix.scaled(self.size(), Qt.KeepAspectRatio))
def toImage( self ): a = self._arrayreq.getResult() assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % (a.shape,) normalize = self._normalize img = gray2qimage(a, normalize) return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
def test(d): normalize = [17,216] t1 = time.time() img1 = QImage(d.shape[1], d.shape[0], QImage.Format_ARGB32_Premultiplied) Converters.array2gray(d, img1, normalize[0], normalize[1]) t1 = time.time()-t1 t2 = time.time() a = numpy.clip(d, *normalize) img2 = qimage2ndarray.gray2qimage(a, normalize) img2 = img2.convertToFormat(QImage.Format_ARGB32_Premultiplied) t2 = time.time()-t2 print img2.width(), img2.height() print "t1: %f msec (new)" % (1000.0*t1,) print "t2: %f msec" % (1000.0*t2,) print "speedup:", t2/t1 img1.save("/tmp/img1.png") img2.save("/tmp/img2.png") if d.dtype == numpy.float32: Converters.array2alphamodulated(d, img2, 1.0, 0.0, 0.0, normalize[0], normalize[1]) img2.save("/tmp/img3.png") Converters.array2alphamodulated(d, img2, 0.0, 1.0, 0.0, normalize[0], normalize[1]) img2.save("/tmp/img4.png") Converters.array2alphamodulated(d, img2, 0.0, 0.0, 1.0, normalize[0], normalize[1]) img2.save("/tmp/img5.png") Converters.array2alphamodulated(d, img2, 1.0, 1.0, 0.0, normalize[0], normalize[1]) img2.save("/tmp/img5.png") Converters.array2alphamodulated(d, img2, 255.0/255.0, 132.0/255.0, 241/255.0, normalize[0], normalize[1]) img2.save("/tmp/img6.png")
def clampChanged(self, min,max): #print "clambis",min,max self.qimage=qimage2ndarray.gray2qimage(self.data, normalize = (min,max)) self.colorMapChanged(str(self.colormapComboBox.currentText())) pixmap = QPixmap.fromImage(self.qimage) self.imageLabel.setPixmap(pixmap) self.redrawImage()
def iterQImages(self): """Iterator over qimages (indexed_8) with colortable set.""" iinfo = np.iinfo(self.image.dtype) ncolors = abs(iinfo.max - iinfo.min) + 1 luts = [ AtPainter.lut_from_color(QColor(c), ncolors) for c in self.colors ] for i, color in enumerate(self.colors): if self.image.ndim == 2: image = gray2qimage(self.image, normalize=False) else: image = gray2qimage(self.image[:, :, i], normalize=False) image.setColorTable(luts[i]) yield image
def wait( self ): a = self._arrayreq.wait() a = a.squeeze() img = gray2qimage(a) img.setColorTable(self._colorTable)# = img.convertToFormat(QImage.Format_ARGB32_Premultiplied, self._colorTable) img = img.convertToFormat(QImage.Format_ARGB32_Premultiplied) return img
def toImage( self ): t = time.time() tWAIT = time.time() self._arrayreq.wait() tWAIT = 1000.0*(time.time()-tWAIT) tAR = time.time() a = self._arrayreq.getResult() tAR = 1000.0*(time.time()-tAR) assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % (a.shape,) normalize = self._normalize if not normalize: normalize = [0,255] # FIXME: It is obviously wrong to truncate like this (right?) if a.dtype == np.uint64 or a.dtype == np.int64: warnings.warn("Truncating 64-bit pixels for display") if a.dtype == np.uint64: a = a.astype( np.uint32 ) elif a.dtype == np.int64: a = a.astype( np.int32 ) # # new conversion # tImg = None if _has_vigra and hasattr(vigra.colors, 'gray2qimage_ARGB32Premultiplied'): if self._normalize is None or \ self._normalize[0] >= self._normalize[1] or \ self._normalize == [0, 0]: #FIXME: fix volumina conventions n = np.asarray([0, 255], dtype=a.dtype) else: n = np.asarray(self._normalize, dtype=a.dtype) tImg = time.time() img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32_Premultiplied) if not a.flags['C_CONTIGUOUS']: a = a.copy() vigra.colors.gray2qimage_ARGB32Premultiplied(a, byte_view(img), n) tImg = 1000.0*(time.time()-tImg) else: self.logger.warning("using slow image creation function") tImg = time.time() if self._normalize: #clipping has been implemented in this commit, #but it is not yet available in the packages obtained via easy_install #http://www.informatik.uni-hamburg.de/~meine/hg/qimage2ndarray/diff/fcddc70a6dea/qimage2ndarray/__init__.py a = np.clip(a, *self._normalize) img = gray2qimage(a, self._normalize) ret = img.convertToFormat(QImage.Format_ARGB32_Premultiplied) tImg = 1000.0*(time.time()-tImg) if self.logger.getEffectiveLevel() >= logging.DEBUG: tTOT = 1000.0*(time.time()-t) self.logger.debug("toImage (%dx%d, normalize=%r) took %f msec. (array req: %f, wait: %f, img: %f)" % (img.width(), img.height(), normalize, tTOT, tAR, tWAIT, tImg)) return img
def update_display(self): frame = mmc.getLastImage() self.image = qnd.gray2qimage(frame,normalize = True) self.videobox.setPixmap(QtGui.QPixmap.fromImage(self.image))
def display_nii(mainwindow, data, x, y, z, volume, min, max, label_axial, label_coronal, label_sagittal, index_lan=0): # 将data中的某点对应的slice进行显示 # slice_x = data[x,:,:] # slice_y = data[:,y,:] # slice_z = data[:,:,z] slice_x = np.rot90(data[-1 - y, :, :]) slice_y = np.rot90(data[:, -1 - x, :]) slice_z = (data[:, :, -1 - z]) # 我也不知道为什么读出来的data要这么转换,但这样的显示是对的 img_x = q2n.gray2qimage(slice_x) img_y = q2n.gray2qimage(slice_y) img_z = q2n.gray2qimage( slice_z) # 将灰度值转化为pyqt可以处理的qimage格式(使用包qimage2ndarray) img_x = QPixmap.fromImage(QImage.mirrored(img_x, True, False)) img_y = QPixmap.fromImage(QImage.mirrored(img_y, True, False)) img_z = QPixmap.fromImage(QImage.mirrored( img_z, True, False)) # 将qimage转换为qpixmap,同时进行镜像处理 # # data = np.rot90(data, -1, (2, 1)) # # data = np.rot90(data, 1, (0, 2)) # # data = np.rot90(data, 1, (0, 1)) # # data = np.transpose(data) # slice_x = data[y, :, :] # slice_y = data[:, x, :] # slice_z = data[:, :, z] # img_x = QPixmap.fromImage(q2n.gray2qimage(slice_x)) # img_y = QPixmap.fromImage(q2n.gray2qimage(slice_y)) # img_z = QPixmap.fromImage(q2n.gray2qimage(slice_z)) label_axial.clear() label_coronal.clear() label_sagittal.clear() # label_axial.setPixmap(img_x) # label_coronal.setPixmap(img_y) # label_sagittal.setPixmap(img_z) # 将三个方向的slice分别显示在对应的label中 return img_x, img_y, img_z
def toImage( self ): t = time.time() tWAIT = time.time() self._arrayreq.wait() tWAIT = 1000.0*(time.time()-tWAIT) tAR = time.time() a = self._arrayreq.getResult() tAR = 1000.0*(time.time()-tAR) assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % (a.shape,) normalize = self._normalize if not normalize: normalize = [0,255] # FIXME: It is obviously wrong to truncate like this (right?) if a.dtype == np.uint64 or a.dtype == np.int64: warnings.warn("Truncating 64-bit pixels for display") if a.dtype == np.uint64: a = a.astype( np.uint32 ) elif a.dtype == np.int64: a = a.astype( np.int32 ) # # new conversion # tImg = None if _has_vigra and hasattr(vigra.colors, 'gray2qimage_ARGB32Premultiplied'): if self._normalize is None or \ self._normalize[0] >= self._normalize[1] or \ self._normalize == [0, 0]: #FIXME: fix volumina conventions n = np.asarray([0, 255], dtype=a.dtype) else: n = np.asarray(self._normalize, dtype=a.dtype) tImg = time.time() img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32_Premultiplied) vigra.colors.gray2qimage_ARGB32Premultiplied(a, byte_view(img), n) tImg = 1000.0*(time.time()-tImg) else: self.logger.warning("using slow image creation function") tImg = time.time() if self._normalize: #clipping has been implemented in this commit, #but it is not yet available in the packages obtained via easy_install #http://www.informatik.uni-hamburg.de/~meine/hg/qimage2ndarray/diff/fcddc70a6dea/qimage2ndarray/__init__.py a = np.clip(a, *self._normalize) img = gray2qimage(a, self._normalize) ret = img.convertToFormat(QImage.Format_ARGB32_Premultiplied) tImg = 1000.0*(time.time()-tImg) if self.logger.getEffectiveLevel() >= logging.DEBUG: tTOT = 1000.0*(time.time()-t) self.logger.debug("toImage (%dx%d, normalize=%r) took %f msec. (array req: %f, wait: %f, img: %f)" % (img.width(), img.height(), normalize, tTOT, tAR, tWAIT, tImg)) return img
def __init__(self, image, maxWidth, maxHeight, spacing): super().__init__() print('loaded') self.setWindowTitle("DotBot") self.setFixedSize(int(user32.GetSystemMetrics(0)*.75), int(user32.GetSystemMetrics(1)*.75)) self.central_widget = QWidget() self.setCentralWidget(self.central_widget) lay = QVBoxLayout(self.central_widget) self.resizedImage = resizeImage(image, int(maxWidth), int(maxHeight), int(spacing)) self.grayImage = cv.cvtColor(self.resizedImage, cv.COLOR_RGB2GRAY) self.ditheredImageGray = grayScaleFloydSteinberg(self.grayImage) self.coords = getCoordsGray(self.ditheredImageGray, int(spacing)) label = QLabel(self) yourQImage = qimage2ndarray.gray2qimage(self.ditheredImageGray) img = QPixmap(yourQImage) img2 = img.scaled(int(user32.GetSystemMetrics(1)*.75), int(user32.GetSystemMetrics(1)*.75), Qt.KeepAspectRatio, Qt.FastTransformation) label.setPixmap(img2) lay.addWidget(label) self.array = coordStringArrayCreation(self.coords, 50) self.COMLabel = QLabel(self) self.COMLabel.setText('COM Port (COM17):') self.COMLabel.move(int((user32.GetSystemMetrics(0)*.45)), int((user32.GetSystemMetrics(1)*.36)-(.075*(user32.GetSystemMetrics(1))))) self.COMLabel.adjustSize() self.COMPort = QLineEdit(self) self.COMPort.move(int((user32.GetSystemMetrics(0)*.533)), int((user32.GetSystemMetrics(1)*.36)-(.075*(user32.GetSystemMetrics(1))))) self.numDots = QLabel(self) self.numDots.setText("Number of Dots: {}".format(str(len(self.coords)))) self.numDots.move(int((user32.GetSystemMetrics(0)*.45)), int((user32.GetSystemMetrics(1)*.36)-(.025*(user32.GetSystemMetrics(1))))) self.numDots.adjustSize() self.printTime = QLabel(self) self.printTime.setText("Approximate Print Time: {} Hrs".format(round(len(self.coords)/7200, 2))) self.printTime.move(int((user32.GetSystemMetrics(0)*.45)), int((user32.GetSystemMetrics(1)*.36)+(.025*(user32.GetSystemMetrics(1))))) self.printTime.adjustSize() self.imageDims = QLabel(self) self.imageDims.setText('Final dimensions: {}mm, {}mm ({}in, {}in)'.format(self.resizedImage.shape[0], self.resizedImage.shape[1], round(self.resizedImage.shape[0]/2.54, 2), round(self.resizedImage.shape[1]/2.54, 2))) self.imageDims.move(int((user32.GetSystemMetrics(0)*.45)), int((user32.GetSystemMetrics(1)*.36)+(.075*(user32.GetSystemMetrics(1))))) self.imageDims.adjustSize() self.pushButtonPrint = QPushButton("Print", self) self.pushButtonPrint.move(int(user32.GetSystemMetrics(0)*.65), int(user32.GetSystemMetrics(1)*.65)) self.pushButtonPrint.adjustSize() self.pushButtonPrint.clicked.connect(self.clickMethod) label.show()
def drawPreview(self): currentItem = self.treeWidget.currentItem() if self.pixmapImage is not None: self.grscene.removeItem(self.pixmapImage) item = currentItem.item if isinstance(currentItem, OverlayTreeWidgetItem): itemdata = imageArray = currentItem.item._data[0, self.sliceValue, :, :, currentItem.item.channel] if item.getColorTab() is not None: if item.dtype != 'uint8': """ if the item is larger we take the values module 256 since QImage supports only 8Bit Indexed images """ olditemdata = itemdata itemdata = numpy.ndarray(olditemdata.shape, 'uint8') if olditemdata.dtype == 'uint32': itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,24),24)[:] elif olditemdata.dtype == 'uint64': itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,56),56)[:] elif olditemdata.dtype == 'int32': itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,24),24)[:] elif olditemdata.dtype == 'int64': itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,56),56)[:] elif olditemdata.dtype == 'uint16': itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,8),8)[:] else: raise TypeError(str(olditemdata.dtype) + ' <- unsupported image _data type (in the rendering thread, you know) ') if len(itemdata.shape) > 2 and itemdata.shape[2] > 1: image0 = qimage2ndarray.array2qimage(itemdata, normalize=False) else: image0 = qimage2ndarray.gray2qimage(itemdata, normalize=False) image0.setColorTable(item.getColorTab() [:]) self.pixmapImage = self.grscene.addPixmap(QtGui.QPixmap.fromImage(image0)) else: if currentItem.item.min is not None: self.pixmapImage = self.grscene.addPixmap(QtGui.QPixmap(qimage2ndarray.gray2qimage(imageArray, normalize = (currentItem.item.min, currentItem.item.max)))) else: self.pixmapImage = self.grscene.addPixmap(QtGui.QPixmap(qimage2ndarray.gray2qimage(imageArray))) self.grview.setScene(self.grscene)
def _qimfac(self, cropped: np.ndarray) -> qtg.QImage: """ Returns an QImage instance based on a cropped ndarray """ if len(cropped.shape) == 2: return qim2nd.gray2qimage(cropped) elif len(cropped.shape) == 3: return qim2nd.array2qimage(cropped) raise Exception('Unsupported image shape')
def iterQImages(self, all_channels=True, normalize=True): """Iterator over to qimage converted images, one qimage per channel.""" if all_channels: channels = range(self._reader.channels) else: channels = self.channels.keys() zprojection = self.params.values()[0].zprojection for ci in channels: image = zProjection(self.image[:, :, :, ci], zprojection, self.zslice) yield gray2qimage(image, normalize=normalize)
def onOutReady(self, output): self.out = output.__deepcopy__(output) self.saveAct.setEnabled(True) if output.ndim == 2: qim = qimage2ndarray.gray2qimage(output) else: qim = qimage2ndarray.array2qimage(output) self.outputImage.setPixmap( QtGui.QPixmap(qim).scaled(self.outputImage.width(), self.outputImage.height())) self.startBt.setEnabled(True) self.centralwidget.setEnabled(True)
def toImage( self ): a = self._arrayreq.getResult() assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % (a.shape,) normalize = self._normalize if normalize: #clipping has been implemented in this commit, #but it is not yet available in the packages obtained via easy_install #http://www.informatik.uni-hamburg.de/~meine/hg/qimage2ndarray/diff/fcddc70a6dea/qimage2ndarray/__init__.py a = np.clip(a, *normalize) img = gray2qimage(a, normalize) return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
def SSFP(self, theta, row, col, te, tr, image_shape0, image_shape1, T2, T1, phantom, Gy, Start_up): Kspace_ssfp = np.zeros((image_shape0, image_shape1), dtype=np.complex_) if (Start_up == True): phantom = Fast.startup_cycle(phantom, theta / 2, te, tr, T2, T1, row, col, 1) phantom = Fast.startup_cycle(phantom, theta, te, tr, T2, T1, row, col, 15) for r in range(Kspace_ssfp.shape[0]): #rows theta = -theta phantom = Fast.rotate_decay(phantom, theta, te, T2, row, col) for c in range(Kspace_ssfp.shape[1]): Gx_step = ((2 * math.pi) / row) * r Gy_step = (Gy / col) * c for ph_row in range(row): for ph_col in range(col): Toltal_theta = (Gx_step * ph_row) + (Gy_step * ph_col) Mag = math.sqrt(((phantom[ph_row, ph_col, 0]) * (phantom[ph_row, ph_col, 0])) + ((phantom[ph_row, ph_col, 1]) * (phantom[ph_row, ph_col, 1]))) Kspace_ssfp[r, c] = Kspace_ssfp[r, c] + ( Mag * np.exp(-1j * Toltal_theta)) QApplication.processEvents() QApplication.processEvents() print(theta) for ph_rowtr in range(row): for ph_coltr in range(col): self.phantom[ph_rowtr, ph_coltr, 0] = 0 self.phantom[ph_rowtr, ph_coltr, 1] = 0 self.phantom[ph_rowtr, ph_coltr, 2] = ( (phantom[ph_rowtr, ph_coltr, 2]) * np.exp(-tr / T1[ph_rowtr, ph_coltr])) + ( 1 - np.exp(-tr / T1[ph_rowtr, ph_coltr])) QApplication.processEvents() iff = np.fft.ifft2(Kspace_ssfp) #print(iff) inverse_array = np.abs(iff) inverse_array = (inverse_array - np.amin(inverse_array)) * 255 / ( np.amax(inverse_array) - np.amin(inverse_array)) inverse_img = gray2qimage(inverse_array) imgreconstruction = inverse_img return imgreconstruction
def toImage(self): a = self._arrayreq.getResult() assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % ( a.shape, ) normalize = self._normalize if normalize: #clipping has been implemented in this commit, #but it is not yet available in the packages obtained via easy_install #http://www.informatik.uni-hamburg.de/~meine/hg/qimage2ndarray/diff/fcddc70a6dea/qimage2ndarray/__init__.py a = np.clip(a, *normalize) img = gray2qimage(a, normalize) return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
def test_gray2qimage(): a = numpy.zeros((240, 320), dtype = float) a[12,10] = 42.42 a[13,10] = -10 qImg = qimage2ndarray.gray2qimage(a) assert not qImg.isNull() assert_equal(qImg.width(), 320) assert_equal(qImg.height(), 240) assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8) assert_equal(a.nbytes, numBytes(qImg) * a.itemsize) assert_equal(numColors(qImg), 256) assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42))) assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0))) assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
def paintEvent(self, e): super().paintEvent(e) if self.activeframe is not None: maxintens = np.max(self.activeframe) img = qnd.gray2qimage(self.activeframe, normalize=(0, maxintens)) pix = QtGui.QPixmap.fromImage(img) pix = pix.scaled(self.size(), Qt.KeepAspectRatio) self.setPixmap(pix) else: self.setText('No video displayed')
def paintEvent(self, QPaintEvent): super(DicomBasicImageViewer, self).paintEvent(QPaintEvent) if self.__displayImg is not None: disImg = self.__displayImg QImg = qimage2ndarray.gray2qimage(disImg) x = self.__imgGeo['centerX'] - self.__imgGeo['width'] / 2 y = self.__imgGeo['centerY'] - self.__imgGeo['height'] / 2 pos = QPoint(x, y) source = QRect(0, 0, self.__imgGeo['width'], self.__imgGeo['height']) painter = QPainter(self) painter.drawPixmap(pos, QPixmap.fromImage(QImg), source)
def SSFP(self): self.flip_angle=self.flipangle row=self.size_image col=self.size_image theta=np.radians(self.flip_angle) TE=int(self.ui.TE_Edit.text()) TR=int(self.ui.TR_Edit.text()) Kspace_ssfp=np.zeros((self.img_array.shape[0],self.img_array.shape[1]),dtype=np.complex_) if (self.Start_up==True): self.phantom=self.startup_cycle(self.phantom,theta/2,TE,TR,self.T2,self.T1,row,col,15) self.phantom=self.rotate_decay(self.phantom,theta/2,TE,self.T2,row,col) for ph_rowtr in range(row): for ph_coltr in range(col): self.phantom[ph_rowtr,ph_coltr,2]=((self.phantom[ph_rowtr,ph_coltr,2])*np.exp(-TR/self.T1[ph_rowtr,ph_coltr]))+(1-np.exp(-TR/self.T1[ph_rowtr,ph_coltr])) #phantom=self.startup_cycle(phantom,theta,TE,TR,self.T2,self.T1,row,col,15) for r in range(Kspace_ssfp.shape[0]): #rows self.phantom=self.rotate_decay(self.phantom,theta,TE,self.T2,row,col) for c in range(Kspace_ssfp.shape[1]): Gx_step=((2*math.pi)/row)*r Gy_step=(self.Gy/col)*c for ph_row in range(row): for ph_col in range(col): Toltal_theta=(Gx_step*ph_row)+(Gy_step*ph_col) Mag=math.sqrt(((self.phantom[ph_row,ph_col,0])*(self.phantom[ph_row,ph_col,0]))+((self.phantom[ph_row,ph_col,1])*(self.phantom[ph_row,ph_col,1]))) Kspace_ssfp[r,c]=Kspace_ssfp[r,c]+(Mag*np.exp(-1j*Toltal_theta)) QApplication.processEvents() QApplication.processEvents() theta=-theta print(theta) for ph_rowtr in range(row): for ph_coltr in range(col): self.phantom[ph_rowtr,ph_coltr,2]=((self.phantom[ph_rowtr,ph_coltr,2])*np.exp(-TR/self.T1[ph_rowtr,ph_coltr]))+(1-np.exp(-TR/self.T1[ph_rowtr,ph_coltr])) QApplication.processEvents() iff= np.fft.ifft2(Kspace_ssfp) #print(iff) inverse_array=np.abs(iff) inverse_array = (inverse_array - np.amin(inverse_array)) * 255/ (np.amax(inverse_array) - np.amin(inverse_array)) inverse_img=gray2qimage(inverse_array) imgreconstruction = QPixmap(inverse_img)#piexel of image self.viewer2.setPhoto(QPixmap(imgreconstruction))
def spin_Echo(self, row, col, te, tr, image_shape0, image_shape1, T2, T1, phantom, Gy, Start_up): theta = np.radians(90) Kspace_SE = np.zeros( (self.img_array.shape[0], self.img_array.shape[1]), dtype=np.complex_) if (Start_up == True): phantom = self.startup_cycle(phantom, theta, te, tr, T2, T1, row, col, 15) for r in range(Kspace_SE.shape[0]): #rows phantom = self.rotate_decay(phantom, np.radians(90), te / 2, T2, row, col) phantom = self.recovery(phantom, row, col, te / 2, self.T1) phantom = self.rotate_decay(phantom, np.radians(180), te / 2, T2, row, col) for c in range(Kspace_SE.shape[1]): Gx_step = ((2 * math.pi) / row) * r Gy_step = (Gy / col) * c for ph_row in range(row): for ph_col in range(col): Toltal_theta = (Gx_step * ph_row) + (Gy_step * ph_col) Mag = math.sqrt(((phantom[ph_row, ph_col, 0]) * (phantom[ph_row, ph_col, 0])) + ((phantom[ph_row, ph_col, 1]) * (phantom[ph_row, ph_col, 1]))) Kspace_SE[r, c] = Kspace_SE[r, c] + ( Mag * np.exp(-1j * Toltal_theta)) QApplication.processEvents() QApplication.processEvents() phantom = self.recovery(phantom, row, col, tr, T1) QApplication.processEvents() iff = np.fft.ifft2(Kspace_SE) inverse_array = np.abs(iff) inverse_array = (inverse_array - np.amin(inverse_array)) * 255 / ( np.amax(inverse_array) - np.amin(inverse_array)) inverse_img = gray2qimage(inverse_array) imgreconstruction = inverse_img return imgreconstruction
def updateArray(self): self.viewData = self.map.getViewData() #should be tuple if not(self.liCT): self.liCT = getColorTables(self.map.getColors()) if self.viewData is None: self.viewData = (self.map.getData(),) self.array = self.viewData[0] self.images = [] for i in range(len(self.viewData)): data = self.viewData[i] img = qimage2ndarray.gray2qimage(data,(0,np.max(data))) img.setColorTable(self.liCT[i]) self.images.append(img)
def displayEdgeImg(self, oriImg): # 边缘显示区域的处理 if self.isMeasuring == 0: # 打开系统,但是未选择测量时 # 边缘显示区域也显示原始图像 img = q2n.gray2qimage(oriImg) img = img.scaled(self.IMG_WIDTH/2, self.ROIRANGE/2, Qt.KeepAspectRatio) self.label_canny.setPixmap(QPixmap.fromImage(img)) # 在原始图像显示区域写入图像 return elif self.isMeasuring == 2: # 仅仅用于演示和显示边缘图像,不对图像进行运算处理 ip = ImageProcessing.ImageProcessing(oriImg) img = cv2.resize(ip.cannyImg, (self.IMG_WIDTH / 2, self.ROIRANGE / 2), cv2.INTER_NEAREST) img = q2n.gray2qimage(img) # 此方法显示会将边缘放大,但实际处理的数据不会改变 self.label_canny.setPixmap(QPixmap.fromImage(img)) # 在边缘显示区域写入图像 return '''单击【自动测量按钮后】 self.isMeasuring==1的情况''' ip = ImageProcessing.ImageProcessing(oriImg) topCurve, bottomCurve = dp.getEdgeList(ip.cannyImg) # waveHeightByPeak, waveLengthByPeak = dp.getWaveParaByPeak(topCurve, bottomCurve) waveHeightBySin, waveLengthBySin = dp.getWaveParaBySin(topCurve, bottomCurve) # 显示钢丝波高的值 waveShow = "%0.1f" % (waveHeightBySin * 6.6188 - 7.356) waveShow += "um" self.label_wave_height.setText(QString(str(waveShow))) img = cv2.resize(ip.cannyImg, (self.IMG_WIDTH / 2, self.ROIRANGE / 2), cv2.INTER_NEAREST) img = q2n.gray2qimage(img) # 此方法显示会将边缘放大,但实际处理的数据不会改变 self.label_canny.setPixmap(QPixmap.fromImage(img)) # 在边缘显示区域写入图像 self.cntImgMeasuring += 1 self.progressBar.setValue(self.cntImgMeasuring) # 更新进度条当前进度 self.waveHeightList.append(waveHeightBySin) self.waveLengthList.append(waveLengthBySin) # 计数器和数据记录 if self.cntImgMeasuring > self.rotatePeriod: # 测量时间大于设定值 self.completeMeasure() # 测量完成进行相应得清零和整理工作
def test_gray2qimage_normalize_onlymax(): a = numpy.zeros((240, 320), dtype=float) a[12, 10] = 42.42 a[13, 10] = -10 qImg = qimage2ndarray.gray2qimage(a, normalize=80) assert not qImg.isNull() assert_equal(qImg.width(), 320) assert_equal(qImg.height(), 240) assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8) assert_equal(a.nbytes, qImg.numBytes() * a.itemsize) assert_equal(qImg.numColors(), 256) x = int(255 * 42.42 / 80.0) assert_equal(hex(qImg.pixel(10, 12)), hex(QtGui.qRgb(x, x, x))) assert_equal(hex(qImg.pixel(10, 13)), hex(QtGui.qRgb(0, 0, 0))) assert_equal(hex(qImg.pixel(10, 14)), hex(QtGui.qRgb(0, 0, 0)))
def __init__(self,mmc,qnd,initial_frame): QWidget.__init__(self) self.image = qnd.gray2qimage(initial_frame,normalize = True) self.videobox = QLabel() self.videobox.setPixmap(QtGui.QPixmap.fromImage(self.image)) self.lyt = QGridLayout() self.lyt.addWidget(self.videobox,0,0) self.setLayout(self.lyt) self.show()
def test_gray2qimage_normalize_onlymax(): a = numpy.zeros((240, 320), dtype = float) a[12,10] = 42.42 a[13,10] = -10 qImg = qimage2ndarray.gray2qimage(a, normalize = 80) assert not qImg.isNull() assert_equal(qImg.width(), 320) assert_equal(qImg.height(), 240) assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8) assert_equal(a.nbytes, qImg.numBytes() * a.itemsize) assert_equal(qImg.numColors(), 256) x = int(255 * 42.42 / 80.0) assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(x,x,x))) assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0))) assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
def npToQImage2(array,minArray,maxArray): """ Transform the array into a qImage with color map return QImage """ if len(array.shape) == 3 and array.shape[2] == 3: qImage = qimage2ndarray.array2qimage(array).rgbSwapped() else: if minArray == maxArray: max_a = 10e-10 else: max_a = max(abs(minArray),abs(maxArray)) qImage = qimage2ndarray.gray2qimage(array,(-max_a,max_a)) qImage.setColorTable(__cm) return qImage
def updateArray(self): self.array = self.map.getData() self.min = np.min(self.array) self.max = np.max(self.array) #Potentials are B&W self.imgList = [] img = plotArrayQt.npToQImage(self.array) self.imgList.append(img) colorMaps = self.map.getColors() for i in range(1,colorMaps.shape[2]): a = colorMaps[:,:,i] img = qimage2ndarray.gray2qimage(a,(0,np.max(a))) img.setColorTable(self.liCT[i]) self.imgList.append(img)
def display(self): self.fitler_FIL = Image.fromarray(np.uint8(self.filter_array), mode='L') # self.fitler_FIL = ImageEnhance.Brightness(self.fitler_FIL) # brightness = 10 # self.fitler_FIL = self.fitler_FIL.enhance(brightness) self.origin_FIL = Image.fromarray(self.origin_array, mode='L') self.oring_img = ImageQt.ImageQt(self.origin_FIL) self.filter_img = ImageQt.ImageQt(self.fitler_FIL) p2 = gray2qimage(np.uint8(self.slices[self.index].pixel_array), normalize=(0, 255)) self.oring_pixmap = QPixmap(QImage(self.oring_img)) self.filter_pixmap = QPixmap(QImage(self.filter_img)) self.label.setPixmap(self.oring_pixmap) self.label_2.setPixmap(self.filter_pixmap)
def paintEvent(self, e): super().paintEvent(e) qp = QtGui.QPainter(self) maxintens = np.max(self.activeframe) img = qnd.gray2qimage(self.activeframe,normalize = (0,maxintens)) size = self.size() pix = QtGui.QPixmap.fromImage(img).scaled(size,Qt.KeepAspectRatio) #draw pixmap from point top left or QLabel pos = QPoint(0,0) qp.drawPixmap(pos,pix) qp.end()
def updateArray(self): self.array = self.map.getData() self.min = np.min(self.array) self.max = np.max(self.array) #Potentials are B&W img = plotArrayQt.npToQImage(self.array) self.imgList = [img] colorMaps = self.map.getColors() flat = self.flattenColorLayers(colorMaps) #print flat[30:50,30:50] imgFlat = qimage2ndarray.gray2qimage(flat) imgFlat.setColorTable(self.ctFlat) self.imgList.append(imgFlat)
def button_browse(self): # try: self.size = int(self.ui.size_2.currentText()) self.filename, _filter = QFileDialog.getOpenFileName( self, "open file", " ", "Image File(*.png *.jpg *.jpeg *.bmp)") if self.filename: imagePath = self.filename imagePath = cv2.imread(self.filename, 0) self.size_image = (len(imagePath)) if (self.size == self.size_image): self.plotWindow1.clear() self.plotWindow2.clear() self.ui.image.clear() imagePath = gray2qimage(imagePath) #b7wel el array le image self.Image_in_graphic_veiw(imagePath) self.show_image(imagePath) self.ui.image.setToolTip("Click to select the pixel") self.stop = True self.count = 0 self.check = False self.Start_up = True self.img_array = cv2.imread(self.filename, 0) ###???? self.T1 = np.zeros((self.size_image, self.size_image)) self.T2 = np.zeros((self.size_image, self.size_image)) self.PD = np.copy(self.img_array) ####??????? n = self.size_image for i in range(n): for j in range(n): if (self.img_array[i, j] > 0 and self.img_array[i, j] < 20): self.T1[i, j] = 500 self.T2[i, j] = 100 elif (self.img_array[i, j] > 20 and self.img_array[i, j] < 180): self.T1[i, j] = 1000 self.T2[i, j] = 120 elif (self.img_array[i, j] > 180 and self.img_array[i, j] < 255): self.T1[i, j] = 1500 self.T2[i, j] = 150 else: print("size doesn't match") QMessageBox.warning(self, "Message", "size doesn't match")
def paintEvent(self, e): super().paintEvent(e) qp = QtGui.QPainter(self) if self.activeframe is not None: img = qnd.gray2qimage(self.activeframe, normalize=(0, self.maxintens)) self.size = img.size() pix = QtGui.QPixmap.fromImage(img).scaled(self.h, self.w, Qt.KeepAspectRatio) self.pixsize = pix.size() pos = QtCore.QPoint(0, 0) qp.drawPixmap(pos, pix) else: pos = QtCore.QPoint(0.5, 0.5) qp.drawText(pos, "Load Video to Display")
def npToQImage(array): """ Transform the array into a qImage with color map return QImage """ if len(array.shape) == 3 and array.shape[2] == 3: qImage = qimage2ndarray.array2qimage(array).rgbSwapped() else: if len(array.shape) == 3: array = array[...,0] max_a = max(abs(np.min(array)),np.max(array)) qImage = qimage2ndarray.gray2qimage(array,(-max_a,max_a)) qImage.setColorTable(__cm) return qImage
def test_gray2qimage_masked(): a = numpy.zeros((240, 320), dtype = float) a[12,10] = 42.42 a[13,10] = -10 a[:,160:] = 100 a = numpy.ma.masked_greater(a, 99) qImg = qimage2ndarray.gray2qimage(a, normalize = True) assert not qImg.isNull() assert_equal(qImg.width(), 320) assert_equal(qImg.height(), 240) assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8) assert_equal(a.nbytes, numBytes(qImg) * a.itemsize) assert_equal(numColors(qImg), 256) assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(255,255,255))) assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0))) x = int(255 * 10.0 / 52.42) assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x))) assert_equal(QtGui.qAlpha(qImg.pixel(0,10)), 255) assert_equal(QtGui.qAlpha(qImg.pixel(200,10)), 0)
def img_cv2qt(img_cv): ''' 将 OpenCV 格式的图像转换为 QImage ''' # 获取图像维度 dimen = len(img_cv.shape) if dimen > 3 or dimen < 2: return None # 图像数据格式 if img_cv.dtype != numpy.dtype(numpy.uint8): return None # 转换 if dimen == 3: qimg = qimage2ndarray.array2qimage(img_cv) qimg = qimg.convertToFormat(QImage.Format_RGB32) elif dimen == 2: qimg = qimage2ndarray.gray2qimage(img_cv) qimg = qimg.convertToFormat(QImage.Format_Grayscale8) else: qimg = None return qimg
def paintEvent(self, e): super().paintEvent(e) qp = QtGui.QPainter(self) img = qnd.gray2qimage(self.activeframe,normalize = (0,self.maxintens)) self.size = img.size() pix = QtGui.QPixmap.fromImage(img).scaled(self.h,self.w,Qt.KeepAspectRatio) self.pixsize = pix.size() pos = QPoint(0,0) qp.drawPixmap(pos,pix) pen = QPen(Qt.red,2) qp.setPen(pen) if self.switch and self.activecoord is not None: qp.drawRect(10*(self.activecoord[1]-4),10*(self.activecoord[0]-4),10*8,10*8) qp.end()
def GRE(self, theta, row, col, te, tr, image_shape0, image_shape1, T2, T1, phantom, Gy, Start_up): Kspace = np.zeros((image_shape0, image_shape1), dtype=np.complex_) if (Start_up == True): phantom = Fast.startup_cycle(phantom, theta, te, tr, T2, T1, row, col, 15) for r in range(Kspace.shape[0]): #rows phantom = Fast.rotate_decay(phantom, theta, te, T2, row, col) for c in range(Kspace.shape[1]): #columns Gx_step = ((2 * math.pi) / row) * r Gy_step = ((Gy) / col) * c for ph_row in range(row): for ph_col in range(col): Toltal_theta = (Gx_step * ph_row) + (Gy_step * ph_col) Mag = math.sqrt(((phantom[ph_row, ph_col, 0]) * (phantom[ph_row, ph_col, 0])) + ((phantom[ph_row, ph_col, 1]) * (phantom[ph_row, ph_col, 1]))) Kspace[r, c] = Kspace[r, c] + (Mag * np.exp(-1j * Toltal_theta)) QApplication.processEvents() QApplication.processEvents() phantom = Fast.recovery(phantom, row, col, tr, T1) QApplication.processEvents() iff = np.fft.ifft2(Kspace) inverse_array = np.abs(iff) inverse_array = (inverse_array - np.amin(inverse_array)) * 255 / ( np.amax(inverse_array) - np.amin(inverse_array)) inverse_img = gray2qimage(inverse_array) imgreconstruction = inverse_img return imgreconstruction
def changeImage(self, pathToImage): #Allows user to cycle through various beam setups if pathToImage.endswith('planewave.npy'): #converting any non-default array into an image phiout = planewave.construct() converted_image = q2.gray2qimage(phiout, normalize = True) elif pathToImage.endswith('lens.npy'): #converting any non-default array into an image phiout = lens.construct() converted_image = q2.gray2qimage(phiout, normalize = True) elif pathToImage.endswith('besselbeam.npy'): amp, phiout = besselbeam.construct(10) converted_image = q2.gray2qimage(phiout, normalize = True) elif pathToImage.endswith('conveyorarray.npy'): #converting any non-default array into an image x = np.linspace(0,2*np.pi,20) for i in range(0,10): for u in x: phiout = projectconveyor.construct(u) # testing rayleighsommerfeld 11/9 phiout = rayleighsommerfeld.rayleighsommerfeld(phiout, -10.) #phiout *= np.conjugate(phiout) #take real part from RS?? converted_image = q2.gray2qimage(phiout, normalize = True) pixmap = QtGui.QPixmap(converted_image) pixmap = pixmap.scaledToHeight(300) SLM(converted_image) self.label.setPixmap(pixmap) QtGui.QApplication.processEvents() #pauses the program to let the image buffer onto the SLM and window elif pathToImage.endswith('besselplane.npy'): amp,phiout = besselbeam.construct(1) # testing rayleighsommerfeld 11/9 phiout = rayleighsommerfeld.rayleighsommerfeld(phiout, +50) #phiout *= np.conjugate(phiout) #take real part from RS?? converted_image = q2.gray2qimage(phiout, normalize = True) pixmap = QtGui.QPixmap(converted_image) pixmap = pixmap.scaledToHeight(300) SLM(converted_image) self.label.setPixmap(pixmap) QtGui.QApplication.processEvents() elif pathToImage.endswith('vortex.npy'): #converting any non-default array into an image phiout = vortex.construct() converted_image = q2.gray2qimage(phiout, normalize = True) else: converted_image = pathToImage pixmap = QtGui.QPixmap(converted_image) pixmap = pixmap.scaledToHeight(300) SLM(converted_image) self.label.setPixmap(pixmap)
def showDialog(self): text, ok = QtGui.QInputDialog.getText(self, 'Input Dialog', 'Enter Hologram:hologram(#)+hologram(#)') if ok: #lists input on the right self.lbl.setText(text) self.lbl.move(500,125) self.lbl.adjustSize() if text == "phaseshift": for u in range(100): phiout = projectconveyor.construct(u*(2.*np.pi/100.)) #calls projectconveyer to create the optical conveyor array img = SLM(phiout) #sends the array to SLM() which projects the array onto the SLM pixmap = QtGui.QPixmap(img) pixmap = pixmap.scaledToHeight(300) self.label.setPixmap(pixmap)#sends the array to the smaller window on the main screen print(u*(2.*np.pi/100.)) QtGui.QApplication.processEvents() #pauses the program to let the image buffer onto the SLM and window elif text == "cameratest": cv.NamedWindow("camera",1) #starts up opencv capture = cv.CaptureFromCAM(1) QtGui.QApplication.processEvents() img = cv.QueryFrame(capture) cv.ShowImage("camera",img) cv.SaveImage("cameratest.jpg",img) elif text == "video": #captures video cap= cv.VideoCapture(0) fourcc = cv.cv.CV_FOURCC(*'XVID') #fourcc = cv.VideoWriter_fourcc(*'XVID') out = cv.VideoWriter('output.avi',fourcc,50.0,(640,480)) #fps and resolution while (cap.isOpened()): ret, frame = cap.read() if ret==True: frame = cv.flip(frame,0) out.write(frame) cv.imshow('frame',frame) if cv.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() #out.release() cv.destroyAllWindows() elif text == "video1": #captures video inline self.playing = True cap= cv.VideoCapture(0) fourcc = cv.cv.CV_FOURCC(*'XVID') #fourcc = cv.VideoWriter_fourcc(*'XVID') out = cv.VideoWriter('output.avi',fourcc,50.0,(640,480)) #fps and resolution while self.playing: _, data = cap.read() data = cv.cvtColor(data, cv.cv.CV_BGR2RGB) qImage = QtGui.QImage(data, data.shape[1], data.shape[0], QtGui.QImage.Format_RGB888) self.label.setPixmap(QtGui.QPixmap.fromImage(qImage)) self.label.adjustSize() QtGui.qApp.processEvents() time.sleep(0.02) else: #IMPORTANT SECTION HERE ---------------ROBUST TEXT ACCEPTOR---------------------------- text = text.encode('ascii','ignore') #converts from unicode to ascii text = text.decode('ascii') hologram = scanner.scanner(text)#splits up the input into the individual holograms with their paramaters print(hologram) phiout = np.zeros((c.slm_h, c.slm_w)) for indiv in hologram: for file in os.listdir("/home/nikitas/Desktop/TractorMaster"):#searches main folder if indiv[0] in file: if file.endswith('.py'): a = __import__(indiv[0])#imports the corresponding function if indiv[1] == '': subphiout = a.construct()#default else: subphiout = a.construct(*indiv[1])#generates the array phiout += subphiout #adds the arrays, next line imports correction """fix = Image.open("/home/nikitas/Desktop/TractorMaster/correction.bmp") fix = np.array(fix) phiout += fix #adds correction map for SLM(532 nm) print("post1",fix) print("post1 shape", fix.shape)""" #phiout = phiout.astype(float) #phiout *= 200./256. #phiout = phiout % 256 print("post2", phiout) print("post2 shape",phiout.shape) converted_image = q2.gray2qimage(phiout, normalize = True) pixmap = QtGui.QPixmap(converted_image) pixmap = pixmap.scaledToHeight(300) SLM(converted_image) self.label.setPixmap(pixmap)
def qimage(self, ): qimg = qimage2ndarray.gray2qimage(self.array, normalize=(self.norm_min, self.norm_max)) qimg.setColorTable(self.cmap.qt) return qimg
def show_image(self, img): pixmap = QtGui.QPixmap(qimage2ndarray.gray2qimage(img, self.normalize)) scaledPixmap = pixmap.scaled(self.image_pane.size(), QtCore.Qt.KeepAspectRatio) self.image_pane.setPixmap(scaledPixmap) self.update()
def test_empty2qimage(): a = numpy.ones((240, 320), dtype = float) qImg = qimage2ndarray.gray2qimage(a, normalize = True) assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0))) qImg = qimage2ndarray.array2qimage(a, normalize = True) assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
def test_gray2qimage_normalize_dont_touch_0_255(): a = numpy.zeros((100, 256), dtype = float) a[:] = numpy.arange(256) qImg = qimage2ndarray.gray2qimage(a, normalize = True) b = qimage2ndarray.raw_view(qImg) assert numpy.all(a == b)
def show_feature_space(self, split, for_group, cut_to_percentile=None, bin_size=128): matrix = self.ca.get_data(for_group, "PCA") xmin, xmax = matrix[:,0].min(), matrix[:,0].max() ymin, ymax = matrix[:,1].min(), matrix[:,1].max() if cut_to_percentile is not None: p_l = lambda x: numpy.percentile(x, cut_to_percentile) p_h = lambda x: numpy.percentile(x, 100-cut_to_percentile) xmin, xmax = p_l(matrix[:,0]), p_h(matrix[:,0]) ymin, ymax = p_l(matrix[:,1]), p_h(matrix[:,1]) bins = (numpy.linspace(xmin, xmax, bin_size), numpy.linspace(ymin, ymax, bin_size)) def mysavefig(filename): ax.set_xlabel("Principal component 1") ax.set_ylabel("Principal component 2") ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) ax.set_aspect(1) plt.tight_layout() plt.savefig(self.ca.output(filename), transparent=True,bbox_inches='tight') plt.clf() with open(self.ca.output("__counts_%s_%s.txt" % ("_".join(for_group), self.ca.classifier.describe())), 'wb') as fh: fh.write("%s\t%s\t%s\n" % (self.ca.name, "_".join(for_group), self.ca.classifier.describe())) fh.write('\n') class_dict = self.ca.get_object_classificaiton_dict() max_class = max(class_dict.keys()) normal_class = range(split) data_normal = self.ca.get_data(for_group, "PCA", in_classes=tuple(normal_class)) ax = plt.subplot(111) image1, image1_org = self.myscatter(ax, data_normal[:, 0], data_normal[:, 1], bins=bins) vigra.impex.writeImage(image1_org.swapaxes(1,0), self.ca.output("%s_%s_%d_%s.tif" % ("_".join(for_group), "normal", image1_org.sum(), self.ca.classifier.describe()))) qimage_normal = qimage2ndarray.gray2qimage(image1, False) qimage_normal.setColorTable(QtColorMapFromHex("#00FF00")) abnormal_class = range(split, max_class+1) data_abnormal = self.ca.get_data(for_group, "PCA", in_classes=tuple(abnormal_class)) ax = plt.subplot(111) image1, image1_org = self.myscatter(ax, data_abnormal[:, 0], data_abnormal[:, 1], bins=bins) vigra.impex.writeImage(image1_org.swapaxes(1,0), self.ca.output("%s_%s_%d_%s.tif" % ("_".join(for_group), "abnormal", image1_org.sum(), self.ca.classifier.describe()))) qimage_abnormal = qimage2ndarray.gray2qimage(image1, False) qimage_abnormal.setColorTable(QtColorMapFromHex("#FF0000")) qpixmap = blend_images_max([qimage_normal, qimage_abnormal]) qpixmap.save(self.ca.output("__sl_%s_%s.png" % ("_".join(for_group), self.ca.classifier.describe()))) fh.write("Data normal\t%d\t%f\n" % (len(data_normal), len(data_normal) / float(len(data_normal) + len(data_abnormal)) )) fh.write("Data abnormal\t%d\t%f\n" % (len(data_abnormal), len(data_abnormal) / float(len(data_normal) + len(data_abnormal)) )) fh.write('\n') ### inlier_target = self.ca.get_data(for_group, "PCA", in_classes=(1,), in_class_type="Predictions") outlier_target = self.ca.get_data(for_group, "PCA", in_classes=(-1,), in_class_type="Predictions") ax = plt.subplot(111) image_in , orig_image_in = self.myscatter(ax, inlier_target [:, 0], inlier_target[:, 1], bins=bins) image_out, orig_image_out = self.myscatter(ax, outlier_target[:, 0], outlier_target[:, 1], bins=bins) fh.write("Inlier\t%d\t%f\n" % (len(inlier_target), len(inlier_target) / float(len(inlier_target) + len(outlier_target)) )) fh.write("Outlier\t%d\t%f\n" % (len(outlier_target), len(outlier_target) / float(len(inlier_target) + len(outlier_target)) )) fh.write('\n') qimage_inlier = qimage2ndarray.gray2qimage(image_in, False) qimage_inlier.setColorTable(QtColorMapFromHex("#00FF00")) qimage_outlier = qimage2ndarray.gray2qimage(image_out, False) qimage_outlier.setColorTable(QtColorMapFromHex("#FF0000")) qpixmap = blend_images_max([qimage_inlier, qimage_outlier]) qpixmap.save(self.ca.output("__od_%s_%s.png" % ("_".join(for_group), self.ca.classifier.describe()))) vigra.impex.writeImage(orig_image_in.swapaxes(1,0), self.ca.output("od_inlier_%s_%d_%s.tif" % ("_".join(for_group), orig_image_in.sum(), self.ca.classifier.describe()))) vigra.impex.writeImage(orig_image_out.swapaxes(1,0), self.ca.output("od_outlier_%s_%d_%s.tif" % ("_".join(for_group), orig_image_out.sum(), self.ca.classifier.describe()))) ### cluster_all_0 = self.ca.get_data(for_group, "PCA", in_classes=(0,), in_class_type="Simple clustering") cluster_all_1 = self.ca.get_data(for_group, "PCA", in_classes=(1,), in_class_type="Simple clustering") ax = plt.subplot(111) image_sc0, orig_image_sc0 = self.myscatter(ax, cluster_all_0 [:, 0], cluster_all_0[:, 1], bins=bins) image_sc1, orig_image_sc1 = self.myscatter(ax, cluster_all_1[:, 0], cluster_all_1[:, 1], bins=bins) fh.write("All-cluster 0\t%d\t%f\n" % (len(cluster_all_0), len(cluster_all_0) / float(len(cluster_all_0) + len(cluster_all_1)) )) fh.write("All-cluster 1\t%d\t%f\n" % (len(cluster_all_1), len(cluster_all_1) / float(len(cluster_all_0) + len(cluster_all_1)) )) fh.write('\n') qimage_inlier = qimage2ndarray.gray2qimage(image_sc0, False) qimage_inlier.setColorTable(QtColorMapFromHex("#00FF00")) qimage_outlier = qimage2ndarray.gray2qimage(image_sc1, False) qimage_outlier.setColorTable(QtColorMapFromHex("#FF0000")) qpixmap = blend_images_max([qimage_inlier, qimage_outlier]) qpixmap.save(self.ca.output("__all_cluster_%s.png" % ("_".join(for_group)) )) vigra.impex.writeImage(orig_image_sc0.swapaxes(1,0), self.ca.output("all_cluster_0_%s_%d.tif" % ("_".join(for_group), orig_image_in.sum()))) vigra.impex.writeImage(orig_image_sc1.swapaxes(1,0), self.ca.output("all_cluster_1_%s_%d.tif" % ("_".join(for_group), orig_image_out.sum()))) for c, cname in class_dict.items(): data = self.ca.get_data(for_group, "PCA", in_classes=(c,)) fh.write("Class %d %s\t%d\t%f\n" % (c, cname, len(data), len(data) / float(len(data_normal) + len(data_abnormal)) )) ax = plt.subplot(111) image1, image1_org = self.myscatter(ax, data[:, 0], data[:, 1], bins=bins) vigra.impex.writeImage(image1_org.swapaxes(1,0), self.ca.output("%s_%s_%d_%s.tif" % ("_".join(for_group), "class_%d_%s" % (c, cname), image1_org.sum(), self.ca.classifier.describe()))) fh.write("\n***\nConfusion Outlierdetection\n") conf = self.get_outlier_confusion() for row in conf: fh.write("\t".join(map(str, row)) + "\n") fh.write("\n") for k, v in self.get_stats(conf, split).items(): fh.write("%s\t%f\n" % (k, v)) fh.write("\n***\nSimple Clustering 1\n") conf = self.get_cluster_confusion() for row in conf: fh.write("\t".join(map(str, row)) + "\n") fh.write("\n") for k, v in self.get_stats(conf, split).items(): fh.write("%s\t%f\n" % (k, v)) fh.write("\n***\nSimple Clustering 2\n") conf = self.get_outlier_confusion(compare_to="Simple clustering", outlier_indicator=0) for row in conf: fh.write("\t".join(map(str, row)) + "\n") fh.write("\n") for k, v in self.get_stats(conf, split).items(): fh.write("%s\t%f\n" % (k, v))
def createThumb(i,colormap) : qimage=qimage2ndarray.gray2qimage(pyfits.getdata(i), normalize = True).scaledToWidth(166) for n in range(256): qimage.setColor (n, qRgb(*colormap[n][:3])) thumbs[i]=qimage
def wait(self): d = (np.random.random(self.shape) * 255).astype(np.uint8) assert d.ndim == 2 img = gray2qimage(d) return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
def wait( self ): a = self._arrayreq.wait() a = (a.squeeze() - self._normalize[0])*255 / (self._normalize[1]-self._normalize[0]) img = gray2qimage(a) return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
def paintBoundaries(tgslicesFile, zSlice, d, boundaryData, mask, filename, swapCoordinates=False, colortable=None, penWidth=4.0): print "* making boundary img '%s'" % filename #b = BoundariesLayerPy(tgslicesFile=tgslicesFile, normalAxis=2, data2scene=QTransform(), swapCoordinates=True) b = BoundariesLayer(None, 2, QTransform(), True) n = b.normalAxis() axis = None if swapCoordinates: if n == 0: axis = "z" elif n == 1: axis = "y" else: axis = "x" else: if n == 0: axis = "x" elif n == 1: axis = "y" else: axis = "z" f = h5py.File(tgslicesFile, 'r') group = "%s/%d" % (axis, zSlice) serializedBoundaries = f[group].value f.close() assert d.ndim == 2 scene = QGraphicsScene() b.setSliceNumber( zSlice ) b.setBoundaries(serializedBoundaries) b.setColormap("tyr") assert boundaryData.dtype == numpy.float32 assert mask.dtype == numpy.float32 b.setBoundaryData(boundaryData, boundaryData.size, boundaryData) b.setBoundaryMask(mask, mask.size) if colortable is not None: b.setColormap(colortable) print "setting pen width to be %f" % penWidth b.setPenWidth(float(penWidth)) print "...done" mag = 4 shape = d.shape dBig = vigra.sampling.resizeImageNoInterpolation(d.astype(numpy.float32), (mag*shape[0], mag*shape[1])) #dBig = dBig.swapaxes(0,1) qimg = qimage2ndarray.gray2qimage(dBig) #qimg = qimg.mirrored(True, False) imgItm = QGraphicsPixmapItem(QPixmap(qimg)) imgItm.setScale(1.0/mag) scene.addItem(imgItm) sourceRect = QRectF(0,0,shape[1], shape[0]) targetRect = QRectF(0,0,mag*shape[1], mag*shape[0]) scene.setSceneRect(sourceRect) scene.addItem(b) img = QImage(targetRect.width(), targetRect.height(), QImage.Format_ARGB32); painter = QPainter(img); painter.setRenderHint(QPainter.Antialiasing); scene.render(painter, targetRect, sourceRect ); img.save(filename) painter.end() #print "img has size ", img.width(), img.height() img = None painter = None scene = None