def makeOAMImage(self, imgData, oam, colorTable): """ OAM情報から画像を生成する 入力:スプライトのグラフィックデータ,OAMデータ,カラーテーブル 出力:画像データ(QPixmap形式) グラフィックデータは4bitで1pxを表現する.アクセス可能な最小単位は8*8pxのタイルでサイズは32byteとなる """ TILE_WIDTH = 8 TILE_HEIGHT = 8 TILE_DATA_SIZE = TILE_WIDTH * TILE_HEIGHT // 2 # python3で整数値の除算結果を得るには//を使う logger.debug("Image Width:\t" + str(oam.sizeX)) logger.debug("Image Height:\t" + str(oam.sizeY)) logger.debug("Flip V:\t" + str(oam.flipV)) logger.debug("Flip H:\t" + str(oam.flipH)) startAddr = oam.startTile * TILE_DATA_SIZE # 開始タイルから開始アドレスを算出(1タイル8*8px = 32バイト) imgData = imgData[startAddr:] # 使う部分を切り出し tileX = oam.sizeX // TILE_WIDTH # サイズからタイルの枚数に変換 tileY = oam.sizeY // TILE_HEIGHT gbaMap = CommonAction.GbaMap(imgData, tileX, tileY) dataImg = gbaMap.getPilImage() if oam.flipH == 1: dataImg = dataImg.transpose(Image.FLIP_LEFT_RIGHT) # PILの機能で水平反転 if oam.flipV == 1: dataImg = dataImg.transpose(Image.FLIP_TOP_BOTTOM) qImg = ImageQt(dataImg) # QImage形式に変換 qImg.setColorTable(colorTable) pixmap = QtGui.QPixmap.fromImage(qImg) # QPixmap形式に変換 return pixmap
def paintEvent(self, QPaintEvent): super(LabelerWindow, self).paintEvent(QPaintEvent) p = QtGui.QPainter(self) image = ImageQt(Image.fromarray(self.buffer)) image = image.scaled(self.width(), self.height()) p.drawImage(0, 0, image) # Now for the HUD # -> Draw green cross-hairs old_pen = p.pen() new_pen = QPen() new_pen.setColor(Qt.green) new_pen.setStyle(Qt.DotLine) new_pen.setWidth(1) p.setPen(new_pen) p.drawLine(0, self.height()/2, self.width(), self.height()/2) p.drawLine(self.width()/2, 0, self.width()/2, self.height()) p.setPen(old_pen) # -> Show help for keystrokes help = "[X] Pos. [C] Neg. [UP] Zoom in [DN] Zoom Out [LT] Undo Last [RT] Ignore [LMB] Move " p.fillRect(0, 0, self.width(), p.fontMetrics().height()*1.5, Qt.gray) p.drawText(0, p.fontMetrics().height(), help)
def __init__(self, m, c, p, path, parent=None): super(watcher, self).__init__(parent) im = PIL.Image.open(path) self.color = self.get_dominant_color(im) desktop =QtGui.QApplication.desktop() width = desktop.width() height = desktop.height() qim = ImageQt(im) self.fixLength = height - 150 if qim.height()>=qim.width(): _img = qim.scaled(self.fixLength, self.fixLength, Qt.Qt.KeepAspectRatio) else: _img = qim.scaled(qim.width()*self.fixLength/qim.height(), qim.width()*self.fixLength/qim.height(), Qt.Qt.KeepAspectRatio) self.setStyleSheet(("background-color: rgb"+str(self.color)+";")) self.setFixedSize(_img.width(), _img.height()+50) self.setWindowIcon(QtGui.QIcon("../pics/icon/icon.png")) self.move((width - self.width())/2, (height - self.height())/2-25) self.labelPhoto = QtGui.QLabel(self) self.labelPhoto.setPixmap(QtGui.QPixmap.fromImage(_img)) self.labelPhoto.setGeometry(0, 0, _img.width(), _img.height()) '''照片名字显示''' font = QtGui.QFont(u'微软雅黑', 15) name = path.split('\\')[-1] self.labelName = QtGui.QLabel(u'● '+name, self) self.labelName.setGeometry(10, _img.height(), len(name)*17, 50) self.nameColor = (int(255-self.color[0]), int(255-self.color[1]), int(255-self.color[2])) self.labelName.setStyleSheet(("color: rgb"+str(self.nameColor)+";")) self.labelName.setFont(font) self.setWindowTitle("xiaoyWatcher v1.0 "+name) '''照片心情显示''' tipMood = [u'无', u'好', u'良', u'差'] self.labelmood = QtGui.QLabel(self) self.labelmood.setPixmap(QtGui.QPixmap('../pics/watcher/moodGOOD'+str(m)+'.png')) self.labelmood.setGeometry(_img.width()-100, _img.height()+13, 34, 26) self.labelmood.setToolTip(tipMood[m]) '''照片云标记显示''' tipCloud = [u'本地', u'云端'] if c == 0: pathC = '../pics/watcher/cloudNO.png' else: pathC = '../pics/watcher/cloudYES.png' self.labelcloud = QtGui.QLabel(self) self.labelcloud.setPixmap(QtGui.QPixmap(pathC)) self.labelcloud.setGeometry(_img.width()-150, _img.height()+14, 34, 21) self.labelcloud.setToolTip(tipCloud[c]) '''显示照片评论''' self.comment = QtGui.QLabel(self) self.comment.setPixmap(QtGui.QPixmap('../pics/watcher/comment.png')) self.comment.setGeometry(_img.width()-50, _img.height()+12, 34, 29) if p: self.comment.setToolTip(u'评论:'+p) else: self.comment.setToolTip(u'尚未添加评论')
def frame_changed(self, data): jpgdata = StringIO(data) # im = Image.open(jpgdata) # self.qimage = ImageQt(im) if self.qimage is None: self.qimage = ImageQt(im) else: self.qimage.loadFromData(Image.open(jpgdata)) self.qpixmap.fromImage(self.qimage, Qt.MonoOnly) self.emit("imageReceived", self.qpixmap)
def frame_changed(self, data): """ Displays frame comming from camera :param data: :return: """ if self._collecting: jpgdata = StringIO(data) im = Image.open(jpgdata) self.qimage = ImageQt(im) self.graphics_camera_frame.setPixmap( self.qpixmap.fromImage(self.qimage, QtImport.Qt.MonoOnly) )
def textToImg(self, plaintext): text = base64.b64encode(str(plaintext).encode("utf-8")) #text = str(plaintext).encode("utf-8") length = len(text) m = hashlib.sha256() m.update(text) texthash = m.hexdigest() qrsize = 500 div = length/qrsize modulus = length%qrsize if modulus > 0: div = div + 1 # clear layout while not self.qrbox.isEmpty(): item = self.qrbox.itemAt(0) item.widget().setParent(None) #self.pic.setPixmap(QtGui.QPixmap.fromImage(imq)) #self.pic.adjustSize() pic = [] for p in range(div): im = self.makeQRCode('bearcode:%s[%dof%d] %s' % (texthash, p+1, div, text[p*qrsize:min(((p+1)*qrsize),length)])) #imq = ImageQt(im.resize((im.size[0]/1, im.size[1]/1), Image.ANTIALIAS).convert("RGBA")) imq = ImageQt(im.convert("RGBA")) painter = QtGui.QPainter(imq) imqrect = imq.rect() painter.setFont(QtGui.QFont("Arial", 24)) rect = painter.boundingRect(imqrect, 0x84, "%d of %d" % (p+1, div)) painter.fillRect(rect, QtGui.QColor(255,255,255)) painter.drawText(imqrect, 0x84, "%d of %d" % (p+1, div)) del painter thispic = QtGui.QLabel() thispic.setPixmap(QtGui.QPixmap.fromImage(imq)) thispic.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) self.qrbox.addWidget(thispic) pic.append(thispic)
def openBinary_I(self): global binary_i_val global last global last_operation binary_i_val = self.horizontalSlider_2.value() self.Binary_inverse.setText(str(binary_i_val)) originalImage = cv2.imread(location,0) _,thresh1 = cv2.threshold(originalImage,binary_i_val,255,cv2.THRESH_BINARY_INV) im = Image.fromarray(np.uint8(cm.gist_earth(thresh1)*255)) qim = ImageQt(im) last = binary_i_val last_operation = "Binary_I" self.image_2.setPixmap(QtGui.QPixmap.fromImage(qim))
def icons(self, x, cb): URL = random_keys[x] with urllib.request.urlopen(URL) as url: f = io.BytesIO(url.read()) #Checks if colorblind mode is on or off if not cb: img = Image.open(f) else: img = Image.open(f).convert("L")#Changes the colour - Anna my_image = ImageQt(img) pixmap = QPixmap.fromImage(my_image) pixmap = pixmap.scaled(150, 150) icon = QIcon() icon.addPixmap(pixmap) return icon
def pil_to_qt(img_list): """[Transform PIL module image (pixel array) into an QT5 readable image] Args: img_list ([list]): [numpy array/list with pixels] Returns: [list]: [PyQt image object] """ qt_pix_list = [] for item in img_list: imgpix = Image.fromarray(item) qt_img = ImageQt(imgpix) qt_pix_list.append(qt_img) return qt_pix_list
def loadedImage_changed(self, img: ImageWrapper = None): if img is not None: self.image = img self.image.draw_image() self.fileWidthLabel.setText(str(img.image_element.width)) self.fileHeightLabel.setText(str(img.image_element.height)) self.fileNameLabel.setText(img.filename) self.filePathLabel.setText(img.file_path) self.fileLayersLabel.setText(str(len(img.channels))) qim = ImageQt(self.image.image_element) pixmap = QPixmap.fromImage(qim).scaled(self.imageLabel.width(), self.imageLabel.height(), QtCore.Qt.KeepAspectRatio) self.imageLabel.setPixmap(pixmap)
def create_filter_tabs(self, mode): label = QLabel(self) saturation_filtered_img = self.img_filter.filter_hsv(mode) pixmap = QPixmap.fromImage( ImageQt(saturation_filtered_img.convert("RGBA"))) label.setPixmap(pixmap) label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) scrollArea = QtWidgets.QScrollArea() scrollArea.setBackgroundRole(QtGui.QPalette.Dark) scrollArea.setWidget(label) scrollArea.widgetResizable = True self.image_tab.addTab(scrollArea, mode) self.resize(pixmap.width(), pixmap.height())
def __init__(self, orig_image, file_name, encoding, location): top, right, bottom, left = location self.orig_image = orig_image self.orig_image_filename = file_name self.pil_patch = Image.fromarray(self.orig_image[top:bottom, left:right]) self.image_with_box = Image.open(file_name).convert("RGBA") draw = ImageDraw.Draw(self.image_with_box) drawrect(draw, [(left, bottom), (right, top)], outline="red", width=8) del draw qim = ImageQt(self.image_with_box) self.pixmap_with_box = QPixmap.fromImage(qim) self.encoding = encoding self.location = location self.match_quality = 0.0
def __init__(self, Parent): QWidget.__init__(self) self.setupUi(self) self.Image = QImage(self.size(), QImage.Format_RGB32) self.Image.fill(Qt.white) self.dx = Parent.widget.x() / 2 self.dy = Parent.widget.y() / 2 self.readOnly = False self.drawing = False self.penSize = 2 self.penColor = Qt.black pass
def on_action_open_triggered(self): self.fileName = QtWidgets.QFileDialog.getOpenFileName(self, "选择想要编辑的图片--wjsaya.top", "./", "常见图片格式(*.jpeg *.jpg *.png *.bmp) ;; 所有文件 (*)") if self.fileName[0] == '' and self.fileName[1] == '': #如果未选择图片,不做操作 return self.statusBar().showMessage("已打开:" + self.fileName[0]) #self.__no_scaled_origin_pixmap = QtGui.QPixmap(self.fileName[0]) img = Image.open(self.fileName[0]).convert('RGBA') self.__no_scaled_origin_pixmap = QtGui.QPixmap.fromImage(ImageQt(img)) self.__no_scaled_big_prev_pixmap = self.__no_scaled_origin_pixmap.copy() self.big_prev.setPixmap(self.__no_scaled_origin_pixmap) self.put_thumb_prev(label_name='thumb_origin')
def put_image_into_scene(self): if self._image: scale_factor = min( float(self._image_view.size().width() - 2) / self._image.size[0], float(self._image_view.size().height() - 2) / self._image.size[1]) resized_image = self._image.resize( (int(scale_factor * self._image.size[0]), int(scale_factor * self._image.size[1])), self.quality) QtImage = ImageQt(resized_image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap)
def run(self): dbgp(("about to run Runnable:", self.url)) q_img = None if self.url in self.img_cache: dbgp("{} in cache !!!".format(self.url)) q_img = self.img_cache[self.url] else: downloaded_img = Image.open(requests.get(self.url, stream=True).raw) # to avoid gc-ed ? # https://stackoverflow.com/questions/61354609/pyqt5-setpixmap-crashing-when-trying-to-show-image-in-label q_img = QImage(ImageQt(downloaded_img).copy()) self.img_cache[self.url] = q_img dbgp(("finished:", self.url)) self.runnable_signal.done.emit(QPixmap.fromImage(q_img))
def displayImage(self, filePath): img = PIL.Image.open(filePath) imgWidth, imgHeight = img.size cropArea = (0, 0, imgWidth, imgHeight) if imgWidth > imgHeight: cropArea = ((imgWidth/2) - (imgHeight/2), 0, (imgWidth/2) + (imgHeight/2), imgHeight) elif imgWidth < imgHeight: cropArea = (0, (imgHeight/2) - (imgWidth/2), imgWidth, (imgHeight/2) + (imgWidth/2)) img = img.crop(cropArea) img = img.resize((28, 28), Image.ANTIALIAS) img = ImageQt(img) self.imgLabel.setPixmap(QPixmap.fromImage(img).scaled(175, 175, QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation)) self.imgLabel.resize(175, 175) self.imgLabel.move(277, 47)
def filter_gray(self): width = self.image.size[0] height = self.image.size[1] pix = self.image.load() for i in range(width): for j in range(height): a = pix[i, j][0] b = pix[i, j][1] c = pix[i, j][2] S = (a + b + c) // 3 self.image_draw.point((i, j), (S, S, S)) img_tmp = ImageQt(self.image.convert('RGBA')) return img_tmp
def showPicture(self): # path, _ = QFileDialog.getOpenFileName(self, '选择图片', 'D:\Python\kears-yolov3-dev\OpenCVtest', # 'Image files(*.jpg *.gif *.png)') # img = Image.open(path) # r_image = self.yolo.detect_image(img) # r_image 为 PIL 图片数据格式 image = Image.open( 'D:\Python\kears-yolov3-dev\OpenCVtest\huajuan1111.jpg') r_image = self.yolo.detect_image(image) qim = ImageQt(r_image) # PIL -> Pixmap 格式转换 pix = QtGui.QPixmap.fromImage(qim) self.PhotoLabel.setPixmap(pix) # 图像更新到UI上 self.PhotoLabel.setScaledContents(True)
def UpdateViewer(self, index): resampler = Image.NEAREST if self.CUR_BICUBIC: resampler = Image.BICUBIC cam = self.CUR_PC if cam is None: return for i in range(0, 30): self.lblPhoto[i].setStyleSheet( "border-top: 1px solid #adadad; border-left: 1px solid #adadad; border-bottom: 1px solid #ffffff; border-right: 1px solid #ffffff;" ) if index == 0: self.CUR_PIC = ImageQt( cam.GetPicture(0).convert("RGBA").resize((256, 224), resampler)) else: self.CUR_PIC = ImageQt( cam.GetPicture(index).convert("RGBA").resize((256, 224), resampler)) self.lblPhoto[index - 1].setStyleSheet( "border: 3px solid green; padding: 1px;") qpixmap = QtGui.QPixmap.fromImage(self.CUR_PIC) self.lblPhotoViewer.setPixmap(qpixmap)
def print_report(self): if not self.validate_report_dates(): return entries = self.model.get_records( self.view.report_date_from.date().toPython(), self.view.report_date_to.date().toPython(), self.view.comboBox.currentText(), ) balance = self.model.get_balance( self.view.report_date_from.date().toPython(), self.view.report_date_to.date().toPython(), ) if not self.create_pdf(self.model.pdf_file.name, entries, balance): return printer = QPrinter(QPrinter.HighResolution) dialog = QPrintDialog(printer) if dialog.exec_() != QPrintDialog.Accepted: return with TemporaryDirectory() as path: images = convert_from_path(self.model.pdf_file.name, dpi=300, output_folder=path) painter = QPainter() painter.begin(printer) for i, image in enumerate(images): if i > 0: printer.newPage() rect = painter.viewport() qtImage = ImageQt(image) qtImageScaled = qtImage.scaled(rect.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) painter.drawImage(rect, qtImageScaled) painter.end()
def showImage(self, img): rgba_img = img.convert("RGBA") qim = ImageQt(rgba_img) pix = QPixmap.fromImage(qim) self.lbl.deleteLater() self.lbl = QLabel(self) self.lbl.setPixmap(pix) self.lbl.resize(pix.width(), pix.height()) width = self.geometry().width() height = self.geometry().height() self.lbl.move(width/2 - pix.width()/2, height/2 - pix.height()/2) self.lbl.updateGeometry() self.lbl.update() self.update() self.lbl.show()
def get_gray(self): image = self.__load_image() draw = ImageDraw.Draw(image) width = image.size[0] height = image.size[1] pix = image.load() for i in range(width): for j in range(height): a = pix[i, j][0] b = pix[i, j][1] c = pix[i, j][2] S = (a + b + c) draw.point((i, j), (S, S, S)) return ImageQt(image.convert('RGBA'))
def setBackground(self, intCount, cpuTemp, gpuTemp, ipTemp): global tempArray img2 = runDirectory + "/BackgroundBlack.png" img = runDirectory + "/Background" + str(intCount) + ".png" palette = QPalette() #设置调色板 palette.setBrush( QPalette.Background, QBrush(QPixmap.fromImage(ImageQt(CombineImage(img, img2))))) self.setPalette(palette) #self.label_Temp.setText("CPU温度:"+str(cpuTemp) +" GPU温度:"+ str(gpuTemp)) self.label_Temp.setText("CPU:" + str(round(cpuTemp)) + " GPU:" + str(round(gpuTemp)) + " " + tempArray[0] + " " + tempArray[21]) self.label_IP.setText("IP:" + ipTemp)
def generate_image(self, x, y): input = [x, y] input = torch.Tensor(input) input = input.view(1, 2) output = self.net(input) array = output.view(100, 100).detach().numpy() array = np.array(array, dtype=np.uint8) new_image = Image.fromarray(array) new_image = new_image.resize((150, 150), Image.ANTIALIAS) qim = ImageQt(new_image) pix = QPixmap.fromImage(qim) return pix
def requestPixmap(self, data): qr = qrcode.QRCode( version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4, ) qr.add_data(data) qr.make(fit=True) img = qr.make_image(fill_color="black", back_color="white") imgQt = ImageQt(img.convert("RGB")) # keep a reference! pixmap = QPixmap.fromImage(imgQt) pixmap = pixmap.scaled(10, 10) return pixmap
def generateImage(self): latent_space = list(latent_params.values()) latent_space = np.reshape(latent_space, [1, 100]) generation = self.model.predict(latent_space) #print(generation.shape) generation = self.model.predict(latent_space) generation = np.reshape(generation, [512, 512]) im = Image.fromarray(np.uint8(generation * 255), 'L') qim = ImageQt(im) pix = QtGui.QPixmap.fromImage(qim) pixmap = QtGui.QPixmap(pix) # pix = QtGui.QPixmap.fromImage(qim) self.graphicLabel.setPixmap(pixmap) self.graphicLabel.update() print(list(latent_params.values()))
def showQR(self): from PIL.ImageQt import ImageQt import qrcode laddr = str(get_ip()) qim = ImageQt(qrcode.make(self.url(laddr))) pix = QPixmap.fromImage(qim) d = QDialog(self) l = QLabel(d) l.setFixedSize(480, 480) l.setPixmap(pix) d.adjustSize() d.show()
def search_image_on_click(self): line_edit_value = self.my_line_edit.text() if line_edit_value == "": self.pic_title.setText( "<h2>Please enter search term in text box</h2>") else: try: global currentimage currentimage = getimage(line_edit_value, 0) qimage = ImageQt(currentimage) pixmap = QtGui.QPixmap.fromImage(qimage) self.pic.setPixmap(pixmap) except IndexError: self.pic_title.setText("<h2>No Matching Images</h2>")
def generate(self): if self.data is None: return -1 scene = QGraphicsScene() qr = qrcode.QRCode(version=2, error_correction=qrcode.constants.ERROR_CORRECT_M, border=1) qr.add_data(self.data) qr.make() self.img_file = qr.make_image() qim = ImageQt(self.img_file) pixmap = QPixmap.fromImage(qim).scaled(256, 256) scene.addPixmap(pixmap) self.ui.graphicsView.setScene(scene)
def update_player_preview(self): try: image = self.assets.species().render_player( self.player, self.preview_armor) pixmap = QPixmap.fromImage(ImageQt(image)) except (OSError, TypeError, AttributeError): # TODO: more specific error handling. may as well except all errors # at this point jeez logging.exception("Couldn't load species images") pixmap = QPixmap() self.ui.player_preview.setStyleSheet("background-color: %s;" % self.preview_bg) self.ui.player_preview.setPixmap(pixmap) self.window.setWindowModified(True)
def updateImage(self): if (self.imgName != None): overlay = segmentImage(self.segModelProto, self.segModelCaffe, self.imgName) newMasks = ImageQt(overlay) self.origSeg = QtGui.QPixmap.fromImage(newMasks) overlay = defAlpha(overlay, 100, 1) overlay = ImageQt(overlay) self.lastSeg = self.segs self.segs = QtGui.QPixmap.fromImage(overlay) self.segment.setPixmap(QtGui.QPixmap(self.segs)) #self.maskStackStart = QImage(self.segs) self.segs.save("LastSeg.png") self.origSeg.save("LastSegO.png") self.origStackStart = QImage(self.origSeg) self.maskStack = [] self.origStack = [] self.maskStack.append(self.maskStackStart) self.origStack.append(self.origStackStart) self.segment.show()
def setCode(self, position, x, y, w, h, text=""): self.text = text qrImg = qrcode.make(text) imgQt = ImageQt(qrImg.convert("RGB")) # keep a reference! pixm = QtGui.QPixmap.fromImage(imgQt) if position == "qrboardleft": self.qrboardleft.setGeometry(x, y, w, h) self.qrboardleft.setPixmap( pixm.scaled(w, h, QtCore.Qt.KeepAspectRatio)) self.qrboardleft.show() if position == "qrboardright": self.qrboardright.setGeometry(x, y, w, h) self.qrboardright.setPixmap( pixm.scaled(w, h, QtCore.Qt.KeepAspectRatio)) self.qrboardright.show()
def get_picture(filter=None): try: fnames = QFileDialog.getOpenFileName() fname = fnames[0] image = Image.open(fname) image = image.resize((125, 125), Image.ANTIALIAS) if filter: image = filter(image) image.save('curr_image.jpg') img_tmp = ImageQt(image.convert('RGBA')) pixmap = QPixmap.fromImage(img_tmp) chat_window.ResImg.setPixmap(pixmap) except Exception as e: print(e)
def fotoGri(self): foto = self.fotoOlustur() self.lbMesaj.setScaledContents(True) gri = Image.new('L', (foto.size[0], foto.size[1])) for i in range(foto.size[0]): for j in range(foto.size[1]): r, g, b = foto.getpixel((i, j)) value = (r + g + b) / 3 value = int(value) gri.putpixel((i, j), value) self.fotograg = ImageQt(gri) pixMap = QPixmap.fromImage(self.fotograg) self.lbMesaj.setPixmap(pixMap) self.fotoSet(gri)
def fotoOteleme(self): foto = self.fotoOlustur() yeni = Image.new('RGB', (foto.size[0], foto.size[1])) self.lbMesaj.setScaledContents(True) for i in range(0, (foto.size[0])): for j in range(0, (foto.size[1])): if i < 100 or j < 100: yeni.putpixel((i, j), (0, 0, 0)) else: r, g, b = foto.getpixel((i - 100, j - 100)) yeni.putpixel((i, j), (r, g, b)) self.fotograg = ImageQt(yeni) pixMap = QPixmap.fromImage(self.fotograg) self.lbMesaj.setPixmap(pixMap) self.fotoSet(yeni)
class EMBLXrayImaging(QtGraphicsManager, AbstractCollect): """ Based on the collection and graphics """ def __init__(self, *args): QtGraphicsManager.__init__(self, *args) AbstractCollect.__init__(self, *args) self.ff_apply = False self.ff_ssim = None self.qimage = None self.qpixmap = None self.image_count = 0 self.image_reading_thread = None self.image_processing_thread = None self.ff_corrected_list = [] self.config_dict = {} self.collect_omega_start = 0 self.omega_start = 0 self.omega_move_enabled = False self.last_image_index = None self.image_dimension = (0, 0) self.graphics_camera_frame = None self.image_polling = None self.repeat_image_play = None self.current_image_index = None self.mouse_hold = False self.mouse_coord = [0, 0] self.centering_started = 0 self.current_dc_parameters = None self._previous_collect_status = None self._actual_collect_status = None self._failed = False self._number_of_images = 0 self._collect_frame = 0 self.printed_warnings = [] self.printed_errors = [] self.chan_collect_status = None self.chan_collect_frame = None self.chan_collect_error = None self.chan_camera_error = None self.chan_camera_warning = None self.chan_frame = None self.chan_ff_ssim = None self.cmd_collect_compression = None self.cmd_collect_detector = None self.cmd_collect_directory = None self.cmd_collect_exposure_time = None self.cmd_collect_in_queue = None self.cmd_collect_num_images = None self.cmd_collect_overlap = None self.cmd_collect_range = None self.cmd_collect_scan_type = None self.cmd_collect_shutter = None self.cmd_collect_start_angle = None self.cmd_collect_template = None self.cmd_collect_start = None self.cmd_collect_shutterless = None self.cmd_collect_abort = None self.cmd_collect_ff_num_images = None self.cmd_collect_ff_offset = None self.cmd_collect_ff_pre = None self.cmd_collect_ff_post = None self.cmd_camera_trigger = None self.cmd_camera_live_view = None self.cmd_camera_write_data = None self.cmd_camera_ff_ssim = None self.beam_focusing_hwobj = None self.session_hwobj = None def init(self): AbstractCollect.init(self) self.ready_event = gevent.event.Event() self.image_dimension = (2048, 2048) QtGraphicsManager.init(self) self.disconnect(self.camera_hwobj, "imageReceived", self.camera_image_received) self.disconnect( self.diffractometer_hwobj, "minidiffStateChanged", self.diffractometer_state_changed, ) self.disconnect( self.diffractometer_hwobj, "centringStarted", self.diffractometer_centring_started, ) self.disconnect( self.diffractometer_hwobj, "centringAccepted", self.create_centring_point ) self.disconnect( self.diffractometer_hwobj, "centringSuccessful", self.diffractometer_centring_successful, ) self.disconnect( self.diffractometer_hwobj, "centringFailed", self.diffractometer_centring_failed, ) self.disconnect( self.diffractometer_hwobj, "pixelsPerMmChanged", self.diffractometer_pixels_per_mm_changed, ) self.disconnect( self.diffractometer_hwobj, "omegaReferenceChanged", self.diffractometer_omega_reference_changed, ) self.disconnect( self.diffractometer_hwobj, "minidiffPhaseChanged", self.diffractometer_phase_changed, ) self.diffractometer_pixels_per_mm_changed((20.0, 20.0)) self.camera_hwobj = None self.graphics_scale_item.set_start_position(20, self.image_dimension[1] - 20) self.graphics_scale_item.set_custom_pen_color(Colors.BLUE) self.graphics_omega_reference_item.set_custom_pen_color(Colors.DARK_BLUE) self.graphics_measure_distance_item.set_custom_pen_color(Colors.DARK_BLUE) self.graphics_beam_item.hide() self.graphics_view.scene().measureItemChanged.connect(self.measure_item_changed) self.graphics_view.scene().setSceneRect( 0, 0, self.image_dimension[0], self.image_dimension[1] ) self.qimage = QtImport.QImage() self.qpixmap = QtImport.QPixmap() self.chan_frame = self.getChannelObject("chanFrame") self.chan_frame.connectSignal("update", self.frame_changed) self.chan_ff_ssim = self.getChannelObject("chanFFSSIM") self.chan_ff_ssim.connectSignal("update", self.ff_ssim_changed) self.chan_collect_status = self.getChannelObject("collectStatus") self._actual_collect_status = self.chan_collect_status.getValue() self.chan_collect_status.connectSignal("update", self.collect_status_update) self.chan_collect_frame = self.getChannelObject("chanFrameCount") self.chan_collect_frame.connectSignal("update", self.collect_frame_update) self.chan_collect_error = self.getChannelObject("collectError") self.chan_collect_error.connectSignal("update", self.collect_error_update) self.chan_camera_warning = self.getChannelObject("cameraWarning") self.chan_camera_warning.connectSignal("update", self.camera_warning_update) self.chan_camera_error = self.getChannelObject("cameraError") self.chan_camera_error.connectSignal("update", self.camera_error_update) self.cmd_collect_detector = self.getCommandObject("collectDetector") self.cmd_collect_directory = self.getCommandObject("collectDirectory") self.cmd_collect_exposure_time = self.getCommandObject("collectExposureTime") self.cmd_collect_in_queue = self.getCommandObject("collectInQueue") self.cmd_collect_num_images = self.getCommandObject("collectNumImages") self.cmd_collect_range = self.getCommandObject("collectRange") self.cmd_collect_scan_type = self.getCommandObject("collectScanType") self.cmd_collect_shutter = self.getCommandObject("collectShutter") self.cmd_collect_shutterless = self.getCommandObject("collectShutterless") self.cmd_collect_start_angle = self.getCommandObject("collectStartAngle") self.cmd_collect_template = self.getCommandObject("collectTemplate") self.cmd_collect_ff_num_images = self.getCommandObject("collectFFNumImages") self.cmd_collect_ff_offset = self.getCommandObject("collectFFOffset") self.cmd_collect_ff_pre = self.getCommandObject("collectFFPre") self.cmd_collect_ff_post = self.getCommandObject("collectFFPost") self.cmd_camera_trigger = self.getCommandObject("cameraTrigger") self.cmd_camera_live_view = self.getCommandObject("cameraLiveView") self.cmd_camera_write_data = self.getCommandObject("cameraWriteData") self.cmd_camera_ff_ssim = self.getCommandObject("cameraFFSSIM") self.cmd_collect_start = self.getCommandObject("collectStart") self.cmd_collect_abort = self.getCommandObject("collectAbort") self.beam_focusing_hwobj = self.getObjectByRole("beam_focusing") self.session_hwobj = self.getObjectByRole("session") def frame_changed(self, data): """ Displays frame comming from camera :param data: :return: """ if self._collecting: jpgdata = StringIO(data) im = Image.open(jpgdata) self.qimage = ImageQt(im) self.graphics_camera_frame.setPixmap( self.qpixmap.fromImage(self.qimage, QtImport.Qt.MonoOnly) ) def ff_ssim_changed(self, value): """ Updates ff ssim :param value: list of lists :return: """ if self._collecting: self.ff_ssim = list(value) self.ff_ssim.sort() def mouse_clicked(self, pos_x, pos_y, left_click): """ Mouse click event for centering :param pos_x: int :param pos_y: int :param left_click: boolean :return: """ QtGraphicsManager.mouse_clicked(self, pos_x, pos_y, left_click) # self.mouse_hold = True # self.mouse_coord = [pos_x, pos_y] if self.centering_started: self.diffractometer_hwobj.image_clicked(pos_x, pos_y) self.play_image_relative(90) # self.diffractometer_hwobj.move_omega_relative(90, timeout=5) self.centering_started -= 1 def mouse_released(self, pos_x, pos_y): """ Mouse release event :param pos_x: :param pos_y: :return: """ QtGraphicsManager.mouse_released(self, pos_x, pos_y) self.mouse_hold = False def mouse_moved(self, pos_x, pos_y): """ Mouse move event :param pos_x: :param pos_y: :return: """ QtGraphicsManager.mouse_moved(self, pos_x, pos_y) if self.mouse_hold: if self.mouse_coord[0] - pos_x > 0: index = self.current_image_index + 1 elif self.mouse_coord[0] - pos_x < 0: index = self.current_image_index - 1 else: return if index < 0: index = self.image_count - 1 elif index >= self.image_count: index = 0 self.mouse_coord[0] = pos_x self.display_image(index) def measure_item_changed(self, measured_points, measured_pix_num): """ Updates image measurement item :param measured_points: :param measured_pix_num: :return: """ start_x = measured_points[0].x() start_y = measured_points[0].y() end_x = measured_points[1].x() end_y = measured_points[1].y() if self.image_reading_thread is None: im = np.array(self.qimage.bits()).reshape( self.qimage.width(), self.qimage.height() ) else: im = self.image_reading_thread.get_raw_image(self.current_image_index) # im_slice = im[start_x:start_y,end_x,end_y] # print im_slice.size, im_slice x = np.linspace(start_x, end_x, measured_pix_num) y = np.linspace(start_y, end_y, measured_pix_num) zi = ndimage.map_coordinates(im, np.vstack((x, y))) self.emit("measureItemChanged", zi) def get_graphics_view(self): """ Returns graphics view :return: """ return self.graphics_view def set_repeate_image_play(self, value): """ Sets repeat the image play :param value: :return: """ self.repeat_image_play = value def set_graphics_scene_size(self, size, fixed): pass def pre_execute(self, data_model): """ Pre execute method :param data_model: :return: """ self._failed = False """ if self.beam_focusing_hwobj.get_focus_mode() != "imaging": self._error_msg = "Beamline is not in Imaging mode" self.emit("collectFailed", self._error_msg) logging.getLogger("GUI").error("Imaging: Error during acquisition (%s)" % self._error_msg) self.ready_event.set() self._collecting = False self._failed = True return """ self.emit("progressInit", ("Image acquisition", 100, False)) self._collect_frame = 0 self.printed_warnings = [] self.printed_errors = [] self.ff_ssim = None self.config_dict = {} path_template = data_model.acquisitions[0].path_template acq_params = data_model.acquisitions[0].acquisition_parameters im_params = data_model.xray_imaging_parameters self._number_of_images = acq_params.num_images if im_params.detector_distance: delta = im_params.detector_distance - self.detector_hwobj.get_distance() if abs(delta) > 0.0001: tine.set( "/P14/P14DetTrans/ComHorTrans", "IncrementMove.START", -0.003482 * delta, ) self.detector_hwobj.set_distance( im_params.detector_distance, wait=True, timeout=30 ) self.cmd_collect_detector("pco") self.cmd_collect_directory(str(path_template.directory)) self.cmd_collect_template(str(path_template.get_image_file_name())) self.cmd_collect_scan_type("xrimg") self.cmd_collect_exposure_time(acq_params.exp_time) self.cmd_collect_num_images(acq_params.num_images) self.cmd_collect_start_angle(acq_params.osc_start) self.cmd_collect_range(acq_params.osc_range) self.cmd_collect_in_queue(acq_params.in_queue != False) shutter_name = self.detector_hwobj.get_shutter_name() self.cmd_collect_shutter(shutter_name) self.cmd_collect_ff_num_images(im_params.ff_num_images) self.cmd_collect_ff_offset( [ im_params.sample_offset_a, im_params.sample_offset_b, im_params.sample_offset_c, ] ) self.cmd_collect_ff_pre(im_params.ff_pre) self.cmd_collect_ff_post(im_params.ff_post) if acq_params.osc_range == 0: self.cmd_camera_trigger(False) else: self.cmd_camera_trigger(True) self.cmd_camera_live_view(im_params.ff_apply) self.cmd_camera_write_data(im_params.camera_write_data) self.cmd_camera_ff_ssim(im_params.ff_ssim_enabled) self.set_osc_start(acq_params.osc_start) self.current_dc_parameters = qmo.to_collect_dict( data_model, self.session_hwobj, qmo.Sample() )[0] self.current_dc_parameters["status"] = "Running" self.current_dc_parameters["comments"] = "" self.store_data_collection_in_lims() def execute(self, data_model): """ Main execute method :param data_model: :return: """ if not self._failed: self._collecting = True self.ready_event.clear() gevent.spawn(self.cmd_collect_start) # self.cmd_collect_start() # if data_model.xray_imaging_parameters.camera_write_data: # self.read_images_task = gevent.spawn(self.load_images, None, None, None, data_model) self.ready_event.wait() self.ready_event.clear() def post_execute(self, data_model): """ Stores results in ispyb :param data_model: :return: """ self.emit("progressStop", ()) self._collecting = False acq_params = data_model.acquisitions[0].acquisition_parameters path_template = data_model.acquisitions[0].path_template filename_template = "%s_%d_%" + str(path_template.precision) + "d" config_filename = ( filename_template % ( path_template.base_prefix, path_template.run_number, path_template.start_num, ) + ".json" ) #config_file_path = os.path.join(path_template.directory, config_filename) archive_config_path = os.path.join( path_template.get_archive_directory(), config_filename ) self.config_dict = { "collect": acq_params.as_dict(), "path": path_template.as_dict(), "imaging": data_model.xray_imaging_parameters.as_dict(), "ff_ssim": None, } if data_model.xray_imaging_parameters.ff_pre: self.config_dict["ff_ssim"] = self.ff_ssim try: if not os.path.exists(path_template.get_archive_directory()): os.makedirs(path_template.get_archive_directory()) with open(archive_config_path, "w") as outfile: json.dump(self.config_dict, outfile) logging.getLogger("GUI").info( "Imaging: Acquisition parameters saved in %s" % archive_config_path ) except BaseException: logging.getLogger("GUI").error( "Imaging: Unable to save acquisition parameters in %s" % archive_config_path ) self.current_dc_parameters["status"] = "Data collection successful" self.update_data_collection_in_lims() # Copy first and last image to ispyb if self.image_reading_thread is not None: image_filename = ( filename_template % ( path_template.base_prefix, path_template.run_number, path_template.start_num, ) + ".jpeg" ) image_filename = os.path.join( path_template.get_archive_directory(), image_filename ) # misc.imsave(image_filename, self.image_reading_thread.get_raw_image(0)) self.store_image_in_lims(0) if acq_params.num_images > 1: image_filename = ( filename_template % ( path_template.base_prefix, path_template.run_number, acq_params.num_images - 1, ) + ".jpeg" ) image_filename = os.path.join( path_template.get_archive_directory(), image_filename ) # misc.imsave(image_filename, self.image_reading_thread.get_raw_image(0)) self.store_image_in_lims(acq_params.num_images - 1) @task def _take_crystal_snapshot(self, filename): """Saves crystal snapshot""" self.graphics_manager_hwobj.save_scene_snapshot(filename) def data_collection_hook(self): """ Not implemented :return: """ pass def move_motors(self, motor_position_dict): """ Not implemented :param motor_position_dict: :return: """ pass def trigger_auto_processing(self, process_event, frame_number): """ Not implemented :param process_event: :param frame_number: :return: """ pass def collect_status_update(self, status): """Status event that controls execution :param status: collection status :type status: string """ if status != self._actual_collect_status: self._previous_collect_status = self._actual_collect_status self._actual_collect_status = status if self._collecting: if self._actual_collect_status == "error": self.emit("collectFailed", self._error_msg) logging.getLogger("GUI").error( "Imaging: Error during the acquisition (%s)" % self._error_msg ) self.ready_event.set() self._collecting = False if self._previous_collect_status is None: if self._actual_collect_status == "busy": self.print_log( "GUI", "info", "Imaging: Preparing acquisition..." ) elif self._previous_collect_status == "busy": if self._actual_collect_status == "collecting": self.emit("collectStarted", (None, 1)) self.print_log("GUI", "info", "Imaging: Acquisition started") elif self._actual_collect_status == "ready": self.ready_event.set() self._collecting = False elif self._previous_collect_status == "collecting": if self._actual_collect_status == "ready": self.ready_event.set() self._collecting = False if self.ff_ssim is None: self.ff_ssim_changed(self.chan_ff_ssim.getValue()) logging.getLogger("GUI").info("Imaging: Acquisition done") elif self._actual_collect_status == "aborting": self.print_log("HWR", "info", "Imaging: Aborting...") self.ready_event.set() self._collecting = False def collect_error_update(self, error_msg): """Collect error behaviour :param error_msg: error message :type error_msg: string """ if self._collecting and len(error_msg) > 0: self._error_msg = error_msg.replace("\n", "") logging.getLogger("GUI").error( "Imaging: Error from detector server: %s" % error_msg ) def collect_frame_update(self, frame): """Image frame update :param frame: frame num :type frame: int """ if self._collecting: if self._collect_frame != frame: self._collect_frame = frame self.emit( "progressStep", (int(float(frame) / self._number_of_images * 100)) ) self.emit("collectImageTaken", frame) def camera_warning_update(self, warning_str): """ Displays camera warnings :param warning_str: :return: """ if self._collecting: if warning_str.endswith("\n"): warning_str = warning_str[:-1] if warning_str.startswith("\n"): warning_str = warning_str[1:] warning_list = warning_str.split("\n") for warning in warning_list: if warning and warning not in self.printed_warnings: logging.getLogger("GUI").warning( "Imaging: PCO camera warning: %s" % warning ) self.printed_warnings.append(warning) def camera_error_update(self, error_str): """ Displays camera errors :param error_str: :return: """ if self._collecting: if error_str.endswith("\n"): error_str = error_str[:-1] if error_str.startswith("\n"): error_str = error_str[1:] error_list = error_str.split("\n") for error in error_list: if error and error not in self.printed_errors: logging.getLogger("GUI").error( "Imaging: PCO camera error: %s" % error ) self.printed_errors.append(error) def set_ff_apply(self, state): """ Apply ff to live view :param state: :return: """ self.ff_apply = state self.display_image(self.current_image_index) def display_image(self, index): """ Displays image on the canvas :param index: int :return: """ angle = self.collect_omega_start + index * self.image_count / 360.0 if angle > 360: angle -= 360 elif angle < 0: angle += 360 self.graphics_omega_reference_item.set_phi_position(angle) self.current_image_index = index im = self.image_reading_thread.get_raw_image(index) if self.ff_apply and self.image_processing_thread: if self.ff_corrected_list[index] is None: im_min, im_max = self.image_processing_thread.get_im_min_max() im = self.image_reading_thread.get_raw_image(index).astype(float) ff_image = self.image_reading_thread.get_ff_image(index).astype(float) ff_corrected_image = np.divide( im, ff_image, out=np.ones_like(im), where=ff_image != 0 ) im = 255.0 * (ff_corrected_image - im_min) / (im_max - im_min) self.ff_corrected_list[index] = im.astype(np.uint16) else: im = self.ff_corrected_list[index] # sx = ndimage.sobel(im, axis=0, mode='constant') # sy = ndimage.sobel(im, axis=1, mode='constant') # im = np.hypot(sx, sy) if im is not None: self.qimage = QtImport.QImage( im.astype(np.uint8), im.shape[1], im.shape[0], im.shape[1], QtImport.QImage.Format_Indexed8, ) self.graphics_camera_frame.setPixmap(self.qpixmap.fromImage(self.qimage)) self.emit("imageLoaded", index) def display_image_relative(self, relative_index): """ Displays relative image :param relative_index: :return: """ self.display_image(self.current_image_index + relative_index) def play_image_relative(self, relative_angle): """ Starts image video :param relative_angle: :return: """ self.play_images(0.04, relative_angle, False) def set_osc_start(self, osc_start): """ Defines osc start :param osc_start: float :return: """ self.collect_omega_start = osc_start def set_omega_move_enabled(self, state): """ Move omega if the image has been displayed :param state: :return: """ self.omega_move_enabled = state def load_images( self, data_path=None, flat_field_path=None, config_path=None, data_model=None, load_all=True, ): """ Load and process images via threads :param data_path: str :param flat_field_path: str :param config_path: str :param data_model: :param load_all: boolean :return: """ ff_ssim = None self.config_dict = {} self.omega_start = self.diffractometer_hwobj.get_omega_position() self.image_reading_thread = None self.image_processing_thread = None ff_filename_list = [] raw_filename_list = [] if not data_model: if data_path.endswith("tiff"): ext_len = 4 else: ext_len = 3 base_name_list = os.path.splitext(os.path.basename(data_path)) prefix = base_name_list[0][: -(ext_len + 1)] # Reading config json -------------------------------------------- if config_path is None: config_path = data_path[:-ext_len] + "json" if os.path.exists(config_path): with open(config_path) as f: self.config_dict = json.load(f) ff_ssim = self.config_dict["ff_ssim"] self.set_osc_start(self.config_dict["collect"]["osc_start"]) else: logging.getLogger("user_level_log").error( "Imaging: Unable to open config file %s" % config_path ) if data_model: if data_model.xray_imaging_parameters.ff_pre: path_template = data_model.acquisitions[0].path_template for index in range(data_model.xray_imaging_parameters.ff_num_images): ff_filename_list.append( os.path.join( path_template.directory, "ff_" + path_template.get_image_file_name() % (index + 1), ) ) elif os.path.exists(flat_field_path): base_name_list = os.path.splitext(os.path.basename(data_path)) os.chdir(os.path.dirname(flat_field_path)) ff_filename_list = sorted( [ os.path.join(os.path.dirname(flat_field_path), f) for f in os.listdir(os.path.dirname(flat_field_path)) if f.startswith("ff_" + prefix) ] ) # Reading raw images ------------------------------------------------- if data_model: acq_params = data_model.acquisitions[0].acquisition_parameters path_template = data_model.acquisitions[0].path_template for index in range(acq_params.num_images): raw_filename_list.append( os.path.join( path_template.directory, path_template.get_image_file_name() % (index + 1), ) ) elif os.path.exists(data_path): os.chdir(os.path.dirname(data_path)) raw_filename_list = sorted( [ os.path.join(os.path.dirname(data_path), f) for f in os.listdir(os.path.dirname(data_path)) if f.startswith(prefix) ] ) else: acq_params = data_model.acquisitions[0].acquisition_parameters path_template = data_model.acquisitions[0].path_template for index in range(acq_params.num_images): raw_filename_list.append( os.path.join( path_template.directory, path_template.get_image_file_name() % (index + 1), ) ) self.image_count = len(raw_filename_list) if self.image_reading_thread is not None: self.image_reading_thread.set_stop() if self.image_processing_thread is not None: image_processing_queue.queue.clear() self.image_processing_thread.set_stop() self.ff_corrected_list = [None] * self.image_count self.image_reading_thread = ImageReadingThread( raw_filename_list, ff_filename_list, ff_ssim ) self.image_reading_thread.start() if ff_filename_list: self.image_processing_thread = ImageProcessingThread(self.image_count) self.image_processing_thread.start() self.current_image_index = 0 self.emit("imageInit", self.image_count) # gevent.sleep(5) self.last_image_index = 0 # self.display_image(0) def play_images(self, exp_time=0.04, relative_index=None, repeat=True): """ Play image video :param exp_time: :param relative_index: :param repeat: :return: """ self.image_polling = gevent.spawn( self.do_image_polling, exp_time, relative_index, repeat ) def do_image_polling(self, exp_time=0.04, relative_index=1, repeat=False): """ Image polling task :param exp_time: :param relative_index: :param repeat: :return: """ self.repeat_image_play = repeat direction = 1 if relative_index > 0 else -1 rotate_in_sec = 10 step = int(self.image_count / (rotate_in_sec / exp_time)) index = 0 while self.current_image_index < self.image_count: if index >= abs(relative_index): break self.display_image(self.current_image_index) self.current_image_index += direction * step if self.repeat_image_play and self.current_image_index >= self.image_count: self.current_image_index -= self.image_count gevent.sleep(exp_time) index += step def stop_image_play(self): """ Stop image video :return: """ self.image_polling.kill() def stop_collect(self): """ Stops image collection :return: """ self.cmd_collect_abort() def mouse_wheel_scrolled(self, delta): """ Handles mouse scroll :param delta: :return: """ if ( self.image_reading_thread is None or self.image_reading_thread.get_raw_image(self.current_image_index) is None ): return if delta > 0: self.current_image_index -= 1 if self.current_image_index < 0: self.current_image_index = self.image_count - 1 else: self.current_image_index += 1 if self.current_image_index == self.image_count: self.current_image_index = 0 self.display_image(self.current_image_index) def start_centering(self): """ Starts 3 click centering :return: """ self.centering_started = 3 self.diffractometer_hwobj.start_centring_method( self.diffractometer_hwobj.CENTRING_METHOD_IMAGING ) def start_n_centering(self): """ Starts n click centering :return: """ self.centering_started = 100 self.diffractometer_hwobj.start_centring_method( self.diffractometer_hwobj.CENTRING_METHOD_IMAGING_N ) def move_omega(self, image_index): """ Rotates omega :param image_index: :return: """ if image_index != self.last_image_index: if self.config_dict: omega_relative = self.config_dict["collect"]["osc_range"] * image_index else: omega_relative = self.image_count / 360.0 * image_index if self.last_image_index > image_index: omega_relative *= -1 self.diffractometer_hwobj.move_omega_relative(omega_relative) self.last_image_index = image_index def move_omega_relative(self, relative_index): """ Rotates omega relative :param relative_index: :return: """ self.move_omega(self.last_image_index + relative_index)
def pil_to_pixmap(im): QtImage1 = ImageQt(im) QtImage2 = QtImage1.copy() return QtGui.QPixmap.fromImage(QtImage2)
class EMBLXrayImaging(HardwareObject): def __init__(self, name): HardwareObject.__init__(self, name) self.display_image_list = [] self.raw_image_arr = None self.ff_image_arr = None self.ff_corrected_image_arr = None self.image = None self.qimage = None self.qpixmap = None self.graphics_scene = None self.graphics_camera_frame = None self.image_polling = None self.repeat_image_play = None self.current_image_index = None self.mouse_hold = False self.mouse_coord = [0, 0] def init(self): self.graphics_view = GraphicsView() self.graphics_camera_frame = GraphicsCameraFrame() self.graphics_view.scene().addItem(self.graphics_camera_frame) self.graphics_view.wheelSignal.connect(self.mouse_wheel_scrolled) self.graphics_view.mouseClickedSignal.connect(self.mouse_clicked) self.graphics_view.mouseReleasedSignal.connect(self.mouse_released) self.graphics_view.mouseMovedSignal.connect(self.mouse_moved) # self.qimage = ImageQt() self.qpixmap = QPixmap() self.chan_frame = self.getChannelObject("chanFrame") if self.chan_frame is not None: self.chan_frame.connectSignal("update", self.frame_changed) def get_image_dimensions(self): return 2048, 2048 def getHeight(self): return 2048 def getWidth(self): return 2048 def start_camera(self): pass def frame_changed(self, data): jpgdata = StringIO(data) # im = Image.open(jpgdata) # self.qimage = ImageQt(im) if self.qimage is None: self.qimage = ImageQt(im) else: self.qimage.loadFromData(Image.open(jpgdata)) self.qpixmap.fromImage(self.qimage, Qt.MonoOnly) self.emit("imageReceived", self.qpixmap) def mouse_clicked(self, pos_x, pos_y, left_click): self.mouse_hold = True self.mouse_coord = [pos_x, pos_y] def mouse_released(self, pos_x, pos_y): self.mouse_hold = False def mouse_moved(self, pos_x, pos_y): if self.mouse_hold: if self.mouse_coord[0] - pos_x > 0: index = self.current_image_index + 1 elif self.mouse_coord[0] - pos_x < 0: index = self.current_image_index - 1 else: return if index < 0: index = len(self.display_image_list) - 1 elif index >= len(self.display_image_list): index = 0 self.mouse_coord[0] = pos_x self.display_image(index) def get_graphics_view(self): return self.graphics_view def set_repeate_image_play(self, value): self.repeat_image_play = value def start_imaging(self, data_model): print(data_model) def set_graphics_scene_size(self, size, fixed): pass def display_image(self, index): self.current_image_index = index # x = self.display_image_list[index] # h,w = x.shape # COLORTABLE = [~((i + (i<<8) + (i<<16))) for i in range(255,-1,-1)] # image = QImage(x.data, w, h, QImage.Format_ARGB32) # im_np = np.transpose(self.display_image_list[index], (1,0,2)) # qimage = QImage(im_np, # im_np.shape[1], # im_np.shape[0], # QImage.Format_RGB888) self.graphics_camera_frame.setPixmap(self.display_image_list[index]) # self.emit("imageLoaded", index, self.display_image_list[index][0]) def load_images(self, data_path, flat_field_path): fileformat = "ppm", "PPM", "tiff", "TIFF", "tif", "TIF", "png", "PNG", "raw" cut = False self.display_image_list = [] base_name_list = os.path.splitext(os.path.basename(data_path)) prefix = base_name_list[0].split("_")[0] suffix = base_name_list[1][1:] os.chdir(os.path.dirname(data_path)) imfiles = os.listdir(os.path.dirname(data_path)) imlist = sorted( [filename for filename in imfiles if filename.endswith(fileformat)] ) self.emit("imageInit", len(imlist)) logging.getLogger("HWR").debug("Start") for image in imlist: self.display_image_list.append(QPixmap(image)) logging.getLogger("HWR").debug("end") if self.display_image_list is not None: self.graphics_view.setFixedSize(1024, 1024) # self.graphics_view.setFixedSize(self.display_image_list.shape[1], # self.display_image_list.shape[0]) self.display_image(0) # gevent.spawn_later(1, self.convert_images, data_path, flat_field_path) def convert_images(self, raw_data_path, flat_field_path): cut = False fileformat = "ppm", "PPM", "tiff", "TIFF", "tif", "TIF", "png", "PNG", "raw" a, b, c, d = 0, 0, 100, 100 os.chdir(os.path.dirname(raw_data_path)) imfiles = os.listdir(os.path.dirname(raw_data_path)) imlist = sorted( [filename for filename in imfiles if filename.endswith(fileformat)] ) if cut: image_arr = dxchange.reader.read_tiff_stack( imlist[0], range(len(imlist)), slc=((b, d, 1), (a, c, 1)) ).astype("float32") else: image_arr = dxchange.reader.read_tiff_stack( imlist[0], range(len(imlist)) ).astype("float32") self.raw_image_arr = image_arr.transpose(1, 2, 0) os.chdir(os.path.dirname(flat_field_path)) flfiles = os.listdir(os.path.dirname(flat_field_path)) fllist = sorted( [filename for filename in flfiles if filename.endswith(fileformat)] ) if cut: ff_arr = dxchange.reader.read_tiff_stack( fllist[0], range(len(fllist)), slc=((b, d, 1), (a, c, 1)) ).astype("float32") else: ff_arr = dxchange.reader.read_tiff_stack( fllist[0], range(len(fllist)) ).astype("float32") self.ff_image_arr = ff_arr.transpose(1, 2, 0) num_cores = multiprocessing.cpu_count() filtered = Parallel(n_jobs=num_cores)( delayed(self.find_flat)(self.raw_image_arr[:, :, i], self.ff_image_arr) for i in range(self.raw_image_arr.shape[2]) ) self.ff_corrected_image_arr = numpy.transpose( numpy.asarray(filtered), (1, 2, 0) ) def find_flat(self, image, flat): best = [0, 0] for f in range(flat.shape[2]): if cut: rms = ssim(image, flat[:, :, f]) else: rms = ssim(image[a:c, b:d], flat[a:c, b:d, f]) if rms > best[0]: best = [rms, f] arr = image / flat[:, :, best[1]] return arr def play_images(self, fps, repeat): self.image_polling = gevent.spawn(self.do_image_polling, fps, repeat) def do_image_polling(self, exp_time, repeat): self.repeat_image_play = repeat image_index = 0 while image_index < len(self.display_image_list): self.display_image(image_index) image_index += 1 if self.repeat_image_play and image_index == len(self.display_image_list): image_index = 0 gevent.sleep(exp_time) def stop_image_play(self): self.image_polling.kill() def mouse_wheel_scrolled(self, delta): if self.display_image_list is None: return if delta > 0: self.current_image_index -= 1 if self.current_image_index < 0: self.current_image_index = len(self.display_image_list) - 1 else: self.current_image_index += 1 if self.current_image_index == len(self.display_image_list): self.current_image_index = 0 self.display_image(self.current_image_index)