def run(self): self.cap = cv2.VideoCapture(0) while self.status: cascade = cv2.CascadeClassifier(self.trained_file) ret, frame = self.cap.read() if not ret: continue # Reading frame in gray scale to process the pattern gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) detections = cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) # Drawing green rectangle around the pattern for (x, y, w, h) in detections: pos_ori = (x, y) pos_end = (x + w, y + h) color = (0, 255, 0) cv2.rectangle(frame, pos_ori, pos_end, color, 2) # Reading the image in RGB to display it color_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Creating and scaling QImage h, w, ch = color_frame.shape img = QImage(color_frame.data, w, h, ch * w, QImage.Format_RGB888) scaled_img = img.scaled(640, 480, Qt.KeepAspectRatio) # Emit signal self.updateFrame.emit(scaled_img) sys.exit(-1)
class ImageMediaView(MediaView): def __init__(self, media, parent): super(ImageMediaView, self).__init__(media, parent) self.widget = QLabel(parent) self.widget.setGeometry(media['geometry']) self.img = QImage() self.set_default_widget_prop() @Slot() def play(self): self.finished = 0 path = '%s/%s' % (self.save_dir, self.options['uri']) rect = self.widget.geometry() self.img.load(path) self.img = self.img.scaled(rect.width(), rect.height(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.widget.setPixmap(QPixmap.fromImage(self.img)) self.widget.show() self.widget.raise_() if float(self.duration) > 0: self.play_timer.setInterval(int(float(self.duration) * 1000)) self.play_timer.start() self.started_signal.emit() @Slot() def stop(self, delete_widget=False): #---- kong ---- if not self.widget: return False del self.img self.img = QImage() #---- super(ImageMediaView, self).stop(delete_widget) return True
def draw(self, qp): qp.setWindow(0, 0, self.width(), self.height()) # 设置窗口 qp.setRenderHint(QPainter.SmoothPixmapTransform) # 画框架背景 qp.setBrush(QColor('#cecece')) # 框架背景色 qp.setPen(Qt.NoPen) rect = QRect(0, 0, self.width(), self.height()) qp.drawRect(rect) sw, sh = self.width(), self.height() # 图像窗口宽高 if not self.opened: qp.drawPixmap(sw / 2 - 100, sh / 2 - 100, 200, 200, QPixmap('img/video.svg')) # 画图 if self.opened and self.image is not None: ih, iw, _ = self.image.shape self.scale = sw / iw if sw / iw < sh / ih else sh / ih # 缩放比例 px = round((sw - iw * self.scale) / 2) py = round((sh - ih * self.scale) / 2) qimage = QImage(self.image.data, iw, ih, 3 * iw, QImage.Format_RGB888) # 转QImage qpixmap = QPixmap.fromImage( qimage.scaled(sw, sh, Qt.KeepAspectRatio)) # 转QPixmap pw, ph = qpixmap.width(), qpixmap.height() # 缩放后的QPixmap大小 qp.drawPixmap(px, py, qpixmap) font = QFont() font.setFamily('Microsoft YaHei') if self.fps > 0: font.setPointSize(14) qp.setFont(font) pen = QPen() pen.setColor(Qt.white) qp.setPen(pen) qp.drawText(sw - px - 130, py + 40, 'FPS: ' + str(round(self.fps, 2))) # 画目标框 pen = QPen() pen.setWidth(2) # 边框宽度 for obj in self.objects: font.setPointSize(10) qp.setFont(font) rgb = [round(c) for c in obj['color']] pen.setColor(QColor(rgb[0], rgb[1], rgb[2])) # 边框颜色 brush1 = QBrush(Qt.NoBrush) # 内部不填充 qp.setBrush(brush1) qp.setPen(pen) # 坐标 宽高 ox, oy = px + round(pw * obj['x']), py + round(ph * obj['y']) ow, oh = round(pw * obj['w']), round(ph * obj['h']) obj_rect = QRect(ox, oy, ow, oh) qp.drawRect(obj_rect) # 画矩形框 # 画 类别 和 置信度 qp.drawText( ox, oy - 5, str(obj['class']) + str(round(obj['confidence'], 2)))
def check_camera_ip_ui(ipaddr: str, label: QLabel): ''' Checks if the given String is a valid ip-address which leads to a camera-/image-stream. If that is the case the downloaded image will be applied to the given label. Args: ipaddr (str) : The ipaddr which should be tested. label (QLabel) : The QLabel which should display the downloaded image. Returns: True, if an image was successfully downloaded and applied to the label. False otherwise. ''' valid = False lw = label.width() #connect to stream cap = cv2.VideoCapture(ipaddr) ret, frame = cap.read() if (ret): #process image rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) h, w, ch = rgbImage.shape bytesPerLine = ch * w #convert to fit into label convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888) p = convertToQtFormat.scaled(lw, 480, Qt.KeepAspectRatio) label.setPixmap(QPixmap(p)) valid = True return valid
class AppWindows(QMainWindow): def __init__(self): super(AppWindows, self).__init__() self.setWindowTitle("Darts Scorer powered by Jcigi") self.resize(1000, 1000) self.widget = QWidget() self.main_layout = QVBoxLayout() self.widget.setLayout(self.main_layout) self.setCentralWidget(self.widget) self.background_image = QImage("images/gdc_logo_uj.png") self.image_rect = QRect() # A menus.py definiálja a menüpontokat create_menus(self) def paintEvent(self, e): painter = QPainter() painter.begin(self) self.drawWidget(painter) painter.end() def drawWidget(self, painter): rect = self.rect() hatter = self.background_image.scaled( QSize(rect.width(), rect.height()), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.image_rect.setRect(rect.x(), rect.y(), hatter.width(), hatter.height()) self.image_rect.moveCenter(rect.center()) painter.setOpacity(0.05) painter.drawImage(self.image_rect, QImage(hatter)) @Slot() def exit_app(self): QApplication.quit() @Slot() def new_game(self): self.new_game_window = GameWindowDialog(self) self.settings_window = GameSettingsDialog(self) self.new_game_window.show() self.settings_window.show() @Slot() def network_settings(self): self.network_settings_window = NetworkSettingsDialog(self) self.network_settings_window.show() @Slot() def match_history(self): self.match_history_window = MatchStatWindow(self) self.match_history_window.show() @Slot() def select_torna(self): self.new_game_window = GameWindowDialog(self, place="network") self.select_merkozes_window = SelectMatchWindow(self) self.new_game_window.show() self.select_merkozes_window.show()
def display_video_stream(self): _, frame = self.capture.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) scaled_image = image.scaled(self.video_size) self.image_label.setPixmap(QPixmap.fromImage(scaled_image)) self.parent_node_instance.video_picture_updated(frame)
def draw(self, qp): qp.setWindow(0, 0, self.width(), self.height()) # 设置窗口 # 画框架背景 qp.setBrush(QColor('#cecece')) # 框架背景色 qp.setPen(Qt.NoPen) rect = QRect(0, 0, self.width(), self.height()) qp.drawRect(rect) sw, sh = self.width(), self.height() # 图像窗口宽高 pw, ph = 0, 0 # 缩放后的QPixmap大小 # 画图 yh = 0 if self.image is not None: ih, iw, _ = self.image.shape self.scale = sw / iw if sw / iw < sh / ih else sh / ih # 缩放比例 yh = round((self.height() - ih * self.scale) / 2) qimage = QImage(self.image.data, iw, ih, 3 * iw, QImage.Format_RGB888) # 转QImage qpixmap = QPixmap.fromImage( qimage.scaled(self.width(), self.height(), Qt.KeepAspectRatio)) # 转QPixmap pw, ph = qpixmap.width(), qpixmap.height() qp.drawPixmap(0, yh, qpixmap) font = QFont() font.setFamily('Microsoft YaHei') if self.fps > 0: font.setPointSize(14) qp.setFont(font) pen = QPen() pen.setColor(Qt.white) qp.setPen(pen) qp.drawText(self.width() - 150, yh + 40, 'FPS: ' + str(round(self.fps, 2))) # 画目标框 pen = QPen() pen.setWidth(2) # 边框宽度 for obj in self.objects: font.setPointSize(10) qp.setFont(font) rgb = [round(c) for c in obj['color']] pen.setColor(QColor(rgb[0], rgb[1], rgb[2])) # 边框颜色 brush1 = QBrush(Qt.NoBrush) # 内部不填充 qp.setBrush(brush1) qp.setPen(pen) # 坐标 宽高 tx, ty = round(pw * obj['x']), yh + round(ph * obj['y']) tw, th = round(pw * obj['w']), round(ph * obj['h']) obj_rect = QRect(tx, ty, tw, th) qp.drawRect(obj_rect) # 画矩形框 # 画 类别 和 置信度 qp.drawText(tx, ty - 5, str(obj['class']) + str(round(obj['confidence'], 2)))
def _read_image(self, path: Path) -> Optional[QImage]: data = self._read_file(path) if data: image = QImage() image.loadFromData(data, path.suffix[1:]) max_size = QSize(600, 400) image_size = image.size() if image_size.width() > max_size.width() or image_size.height( ) > max_size.height(): image = image.scaled(max_size, Qt.AspectRatioMode.KeepAspectRatio) return image
def as_image(self, tile_length=8): if tile_length not in self.cached_tiles.keys(): width = height = tile_length image = QImage(self.pixels, self.WIDTH, self.HEIGHT, QImage.Format_RGB888) image = image.scaled(width, height) self.cached_tiles[tile_length] = image return self.cached_tiles[tile_length]
def requestImage(self, url, size, requestedSize): url = QUrl(url) image = QImage(url.toLocalFile()) width, height = image.width(), image.height() if size: size.setWidth(width) size.setHeight(height) if requestedSize.width() > 0: width = requestedSize.width() if requestedSize.height() > 0: height = requestedSize.height() return image.scaled(min(width, THUMBNAIL_SIZE), min(height, THUMBNAIL_SIZE), Qt.KeepAspectRatio)
def requestImage(self, url, size, requestedSize): url = QUrl(url) image = QImage(url.toLocalFile()) width, height = image.width(), image.height() if size: size.setWidth(width) size.setHeight(height) if requestedSize.width() > 0: width = requestedSize.width() if requestedSize.height() > 0: height = requestedSize.height() return image.scaled(min(width, THUMBNAIL_SIZE), min(height, THUMBNAIL_SIZE), Qt.KeepAspectRatio)
def show_image(self, cv_image): self.resize(200, 200) rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) img_w = qt_image.width() img_h = qt_image.height() proportion = img_w / img_h self.resize(self.width() * proportion, self.height()) qt_image = qt_image.scaled(self.width(), self.height()) self.setPixmap(QPixmap(qt_image)) self.parent_node_instance.update_shape()
def local_diagnosis(self): self.ui.pb_local_diagnosis.setEnabled(False) self.ui.pb_real_time_diagnosis.setEnabled(False) # 同一时间只能进行一种诊断 file_path, _ = QFileDialog.getOpenFileName(self, '选择数据', '.', '(*.mat)') if '' == file_path: # 没有选择文件,也就是退出了本地诊断 self.ui.pb_real_time_diagnosis.setEnabled(True) self.ui.pb_local_diagnosis.setEnabled(True) return text = self.ui.tb_diagnosis_result.toPlainText() self.ui.tb_diagnosis_result.setText(text + '\n选择文件:' + file_path + '\n--------------') if '' == self.model_file_path: # 没有选择过模型 reply = QMessageBox.information(self, '提示', '你还没有选择模型哦!', QMessageBox.Yes, QMessageBox.Yes) if QMessageBox.Yes == reply: self.ui.pb_real_time_diagnosis.setEnabled(True) self.ui.pb_local_diagnosis.setEnabled(True) return text = self.ui.tb_diagnosis_result.toPlainText() self.ui.tb_diagnosis_result.setText(text + '\n本地诊断:正在读取数据...\n--------------') # 读取完数据后,自动可视化数据 visual_data_pic_path = visual_data(file_path, self.cache_path) # 读取图片文件,进行显示 img = QImage(visual_data_pic_path) img_result = img.scaled( self.ui.l_visual_diagnosis_data.width(), self.ui.l_visual_diagnosis_data.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_visual_diagnosis_data.setPixmap( QPixmap.fromImage(img_result)) text = self.ui.tb_diagnosis_result.toPlainText() self.ui.tb_diagnosis_result.setText(text + '\n实时诊断:正在诊断..\n--------------') # 开个子线程进行故障诊断 diagnosis_end_signal.send_msg.connect( self.diagnosis_end_slot) # 信号与槽连接 diagnosis_thread = threading.Thread(target=fault_diagnosis, args=(self.model_file_path, file_path)) diagnosis_thread.start()
def _writeFrame(self, surface: QImage): w = self.stream.width h = self.stream.height surface = surface.scaled(w, h) if self.scale else surface.copy() # Draw the mouse pointer. Render mouse clicks? p = QPainter(surface) p.setBrush(QColor.fromRgb(255, 255, 0, 180)) (x, y) = self.mouse p.drawEllipse(x, y, 5, 5) p.end() # Output frame. frame = av.VideoFrame.from_image(ImageQt.fromqimage(surface)) for packet in self.stream.encode(frame): if self.progress: self.progress() self.mp4.mux(packet)
def real_time_diagnosis(self): self.ui.pb_real_time_diagnosis.setEnabled(False) self.ui.pb_local_diagnosis.setEnabled(False) # 同一时间只能进行一种诊断 if '' == self.model_file_path: # 没有选择过模型 reply = QMessageBox.information(self, '提示', '你还没有选择模型哦!', QMessageBox.Yes, QMessageBox.Yes) if QMessageBox.Yes == reply: self.ui.pb_real_time_diagnosis.setEnabled(True) self.ui.pb_local_diagnosis.setEnabled(True) return text = self.ui.tb_diagnosis_result.toPlainText() self.ui.tb_diagnosis_result.setText(text + '\n实时诊断:正在采集数据...\n--------------') # TODO: 这里通过读取指定的文件夹数据来模拟实时采集数据 real_time_data_path = os.getcwd( ) + '/real_time_data/0HP/48k_Drive_End_B007_0_122.mat' # 读取完数据后,自动可视化数据 visual_data_pic_path = visual_data(real_time_data_path, self.cache_path) # 读取图片文件,进行显示 img = QImage(visual_data_pic_path) img_result = img.scaled( self.ui.l_visual_diagnosis_data.width(), self.ui.l_visual_diagnosis_data.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_visual_diagnosis_data.setPixmap( QPixmap.fromImage(img_result)) text = self.ui.tb_diagnosis_result.toPlainText() self.ui.tb_diagnosis_result.setText(text + '\n实时诊断:正在诊断..\n--------------') # 开个子线程进行故障诊断 diagnosis_end_signal.send_msg.connect( self.diagnosis_end_slot) # 信号与槽连接 diagnosis_thread = threading.Thread(target=fault_diagnosis, args=(self.model_file_path, real_time_data_path)) diagnosis_thread.start()
def run(self): print('Listening to camera stream') while self.enabled: frame = self.footage_socket.recv() imgBuff = np.frombuffer(frame, dtype=np.uint8) img = cv2.imdecode(imgBuff, cv2.IMREAD_COLOR) if self.record_output: self.video_writer.write(img) if self.enabled: rgbImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) h, w, ch = rgbImage.shape bytesPerLine = ch * w convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888) p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio) self.changePixmap.emit(p) if self.record_output: self.StopRecording()
def emit_qframes(frames, qframes): """ Emit some number of stream frames, depending on cameras quantity Should do it through QFrame objects, defined above Don't forget to connect QFrame objects to PyQt slots in widgets! :param frames: dictionary of frames in format of {camera:frame} :param qframes: dictionary of qframes in the same format as frames """ for camera in frames: # converting to RGB rgb_image = cv2.cvtColor(frames[camera], cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape # converting to QImage qpicture = QImage(rgb_image.data, w, h, ch * w, QImage.Format_RGB888) # scaling QImage to resolution scaled_qpicture = qpicture.scaled(width, height, Qt.KeepAspectRatio) # emitting the picture qframes[camera].signal.emit(scaled_qpicture)
class QLayerImage(QLayer): """ QLayer containing its own source image """ @classmethod def fromImage(cls, mImg, parentImage=None, sourceImg=None): layer = QLayerImage(QImg=mImg, parentImage=parentImage) layer.parentImage = parentImage layer.sourceImg = sourceImg return layer def __init__(self, *args, **kwargs): self.sourceImg = QImage() super().__init__(*args, **kwargs) def bTransformed(self, transformation, parentImage): """ Applies transformation to a copy of layer and returns the copy. @param transformation: @type transformation: QTransform @param parentImage: @type parentImage: vImage @return: transformed layer @rtype: QLayerImage """ tLayer = super().bTransformed(transformation, parentImage) if tLayer.tool is not None: tLayer.tool.layer = tLayer tLayer.tool.img = tLayer.parentImage return tLayer def inputImg(self, redo=True): """ Overrides QLayer.inputImg() @return: @rtype: QImage """ return self.sourceImg.scaled(self.getCurrentImage().size())
def visual_data(self): self.ui.pb_visual_data.setEnabled(False) if '' == self.data_file_path: # 没有选择过文件 reply = QMessageBox.information(self, '提示', '请先选择文件!', QMessageBox.Yes, QMessageBox.Yes) if reply == QMessageBox.Yes: self.ui.pb_visual_data.setEnabled(True) return # 直接退出 visual_data_pic_path = visual_data(self.data_file_path, self.cache_path) # 读取图片文件,进行显示 img = QImage(visual_data_pic_path) img_result = img.scaled( self.ui.l_visual_data.width(), self.ui.l_visual_data.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, # 保持长宽比例 Qt.SmoothTransformation # 平滑处理,使图片不失真 ) self.ui.l_visual_data.setPixmap(QPixmap.fromImage(img_result)) self.ui.pb_visual_data.setEnabled(True)
def setBackgroundImage(self): img = QImage(QSize(256, 256), QImage.Format_ARGB32) img.fill(QColor(100, 100, 100)) a = np.arange(256) buf = np.meshgrid(a, a) buf1 = QImageBuffer(img)[:, :, :3][:, :, ::-1] buf1[:, :, 0], buf1[:, :, 1] = buf buf1[:, :, 2] = 1 buf2 = np.tensordot(buf1, self.invM, axes=(-1, -1)) * 255 np.clip(buf2, 0, 255, out=buf2) buf1[...] = buf2 img = img.scaled(self.cwSize, self.cwSize) qp = QPainter(img) # draw edges qp.drawLine(self.R, self.G) qp.drawLine(self.G, self.B) qp.drawLine(self.B, self.R) # draw center b = (self.B + self.R + self.G) / 3.0 qp.drawLine(b-QPointF(10, 0), b + QPointF(10, 0)) qp.drawLine(b - QPointF(0, 10), b + QPointF(0, 10)) qp.end() self.scene().addItem(QGraphicsPixmapItem(QPixmap.fromImage(img)))
def run(self): while True and self.play: # start = time.time() while self.pause: pass if self.change_frame_number_flg: self.cap.set(cv2.CAP_PROP_POS_FRAMES, int(self.new_frame_number)) print("cap setting inside while") self.change_frame_number_flg=False ret, frame = self.cap.read() if ret: rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # print(rgbImage.shape) h, w, ch = rgbImage.shape bytesPerLine = ch * w convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888) # p = convertToQtFormat.scaled(720, 405, Qt.KeepAspectRatio) p = convertToQtFormat.scaled(720,405, Qt.KeepAspectRatio) self.changePixmap.emit(p) self.msleep(self.sleep_duration)
class Scribble(QWidget): def __init__(self, parent=None, scribbleImagePath=""): QWidget.__init__(self, parent) self.show() self.raise_() # Available space to display on self.imageAreaWidth = 0 self.imageAreaHeight = 0 # Real number of pixels (unscaled) self.scribbleWidth = 1 self.scribbleHeight = 1 # Scaling factor between the real image size and the size after scaling to fit the imageArea self.scaleFactor = 1.0 self.scribbling = False # if scribbling at this time and moment self.penMoved = False # if the pen already moved while scribbling self.myPenWidth = 1 self.myPenColor = QColor(0, 0, 255, 255) self.lastPoint = None self.scribbleImagePath = scribbleImagePath self.image = None def refreshScribble(self, width, height): self.imageAreaWidth = width self.imageAreaHeight = height self.setGeometry(0, 0, self.imageAreaWidth, self.imageAreaHeight) def setColor(self, color): # QColor object self.myPenColor = color def setPenSize(self, size): self.myPenWidth = size def mousePressEvent(self, event): if event.button() == Qt.LeftButton: self.lastPoint = event.pos() self.scribbling = True self.penMoved = False def mouseMoveEvent(self, event): if self.scribbling: self.drawLineTo(event.pos()) self.penMoved = True def mouseReleaseEvent(self, event): if event.button() == Qt.LeftButton and self.scribbling: self.drawLineTo(event.pos()) if self.myPenColor == QColor(255, 0, 0, 255): self.drawCircle(event.pos()) self.scribbling = False self.penMoved = False def setupScribble(self, width, height): self.scribbleWidth = width self.scribbleHeight = height if self.scribbleImagePath == "": self.image = QImage(self.scribbleWidth, self.scribbleHeight, QImage.Format_ARGB32) # print(self.image.depth()) # prints 32 self.image.fill(qRgba(0, 0, 0, 0)) else: self.image = QImage(self.scribbleImagePath) self.image = self.image.convertToFormat(QImage.Format_ARGB32) self.update() def drawCircle(self, point): painter = QPainter(self.image) painter.setCompositionMode(QPainter.CompositionMode_Source) painter.setRenderHint(QPainter.Antialiasing) brush = QBrush(QColor(0, 0, 255, 255), Qt.SolidPattern) pen = QPen(brush, 0.2 * self.myPenWidth / self.scaleFactor, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) painter.setPen(pen) radius = 1.5 * self.myPenWidth / self.scaleFactor rectangle = QRectF((point.x() / self.scaleFactor) - (radius / 2), (point.y() / self.scaleFactor) - (radius / 2), radius, radius) painter.drawEllipse(rectangle) self.update() def drawLineTo(self, endPoint): if not self.penMoved: endPoint.setX( endPoint.x() + 1 ) # ensures a dot is being drawn if there was just a click and no mouse move painter = QPainter(self.image) painter.setCompositionMode(QPainter.CompositionMode_Source) painter.setRenderHint(QPainter.Antialiasing) brush = QBrush(self.myPenColor, Qt.SolidPattern) pen = QPen(brush, self.myPenWidth / self.scaleFactor, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) # pen = QPen(self.myPenColor, self.myPenWidth, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) painter.setPen(pen) painter.drawLine(self.lastPoint / self.scaleFactor, endPoint / self.scaleFactor) self.update() self.lastPoint = endPoint def paintEvent(self, event): painter = QPainter(self) dirtyRect = event.rect() scaledImage = self.image.scaled(self.imageAreaWidth, self.imageAreaHeight, Qt.KeepAspectRatio, Qt.FastTransformation) self.scaleFactor = float(scaledImage.width()) / float( self.scribbleWidth) painter.drawImage(dirtyRect, scaledImage, dirtyRect)
class Canvas(QWidget): content_changed = Signal() _background_color = QColor.fromRgb(0, 0, 0) _foreground_color = QColor.fromRgb(255, 255, 255) def __init__(self, parent, w, h, pen_width, scale): super().__init__(parent) self.w = w self.h = h self.scaled_w = scale * w self.scaled_h = scale * h self.scale = scale # Set size self.setFixedSize(self.scaled_w, self.scaled_h) # Create image self.small_image = QImage(self.w, self.h, QImage.Format_RGB32) self.small_image.fill(self._background_color) self.large_image = QImage(self.scaled_w, self.scaled_h, QImage.Format_RGB32) self.large_image.fill(self._background_color) # Create pen self.pen = QPen() self.pen.setColor(self._foreground_color) self.pen.setJoinStyle(Qt.RoundJoin) self.pen.setCapStyle(Qt.RoundCap) self.pen.setWidthF(scale * pen_width) # There is currently no path self.currentPath = None self.content_changed.connect(self.repaint) def _get_painter(self, paintee): painter = QPainter(paintee) painter.setPen(self.pen) painter.setRenderHint(QPainter.Antialiasing, True) return painter def _derive_small_image(self, large_image=None): if large_image is None: large_image = self.large_image # Downsample image self.small_image = large_image.scaled(self.w, self.h, mode=Qt.SmoothTransformation) self.content_changed.emit() def _current_path_updated(self, terminate_path=False): # Determine whether to draw on the large image directly or whether to make a temporary copy paintee = self.large_image if terminate_path else self.large_image.copy( ) # Draw path on the large image of choice painter = self._get_painter(paintee) if self.currentPath.elementCount() != 1: painter.drawPath(self.currentPath) else: painter.drawPoint(self.currentPath.elementAt(0)) painter.end() # Optionally terminate the path if terminate_path: self.currentPath = None # Downsample image self._derive_small_image(paintee) def _clear_image(self): self.large_image.fill(self._background_color) self._derive_small_image() def get_content(self): return np.asarray(self.small_image.constBits()).reshape( (self.h, self.w, -1)) def set_content(self, image_rgb): for row in range(image_rgb.shape[0]): for col in range(image_rgb.shape[1]): self.small_image.setPixel(col, row, image_rgb[row, col]) self.large_image = self.small_image.scaled( self.scaled_w, self.scaled_h, mode=Qt.SmoothTransformation) self._derive_small_image() self.content_changed.emit() def mousePressEvent(self, event): if event.button() == Qt.LeftButton: # Create new path self.currentPath = QPainterPath() self.currentPath.moveTo(event.pos()) self._current_path_updated() def mouseMoveEvent(self, event): if (event.buttons() & Qt.LeftButton) and self.currentPath is not None: # Add point to current path self.currentPath.lineTo(event.pos()) self._current_path_updated() def mouseReleaseEvent(self, event): if (event.button() == Qt.LeftButton) and self.currentPath is not None: # Add terminal point to current path self.currentPath.lineTo(event.pos()) self._current_path_updated(terminate_path=True) elif event.button() == Qt.RightButton: self._clear_image() def paintEvent(self, event): paint_rect = event.rect() # Only paint the surface that needs painting painter = self._get_painter(self) # Draw image painter.scale(self.scale, self.scale) painter.drawImage(paint_rect, self.small_image, paint_rect) painter.end() painter = self._get_painter(self) #if self.currentPath is not None: # painter.drawPath(self.currentPath) @Slot() def repaint(self): super().repaint()
def show_result(self): if '' == self.model_name: # 说明还没有训练过模型 reply = QMessageBox.information(self, '提示', '你还没有训练模型哦!', QMessageBox.Yes, QMessageBox.Yes) if reply == QMessageBox.Yes: return show_mode = self.ui.buttonGroup.checkedId() # print(show_mode) # TODO: 这里的 Id 自己测出来的, 应该还有别的方法直接得到所选框的内容 if -2 == show_mode: # 展示 分类报告 self.ui.l_train_result.setText(self.classification_report) elif -3 == show_mode: # 展示 混淆矩阵 # 读取图片文件,进行显示 img = QImage(self.cache_path + '/' + self.model_name + '_confusion_matrix.png') img_result = img.scaled( self.ui.l_train_result.width(), self.ui.l_train_result.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_train_result.setPixmap(QPixmap.fromImage(img_result)) elif -4 == show_mode: # 展示 ROC曲线 # 读取图片文件,进行显示 img = QImage(self.cache_path + '/' + self.model_name + '_ROC_Curves.png') img_result = img.scaled( self.ui.l_train_result.width(), self.ui.l_train_result.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_train_result.setPixmap(QPixmap.fromImage(img_result)) elif -5 == show_mode: # 展示 精度召回曲线 # 读取图片文件,进行显示 img = QImage(self.cache_path + '/' + self.model_name + '_Precision_Recall_Curves.png') img_result = img.scaled( self.ui.l_train_result.width(), self.ui.l_train_result.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_train_result.setPixmap(QPixmap.fromImage(img_result)) elif -6 == show_mode: # 展示 损失曲线 if 'random_forest' == self.model_name: # 随机森林没有损失曲线 QMessageBox.information(self, '提示', '随机森林模型没有损失曲线哦!', QMessageBox.Yes, QMessageBox.Yes) else: # 读取图片文件,进行显示 img = QImage(self.cache_path + '/' + self.model_name + '_train_valid_loss.png') img_result = img.scaled( self.ui.l_train_result.width(), self.ui.l_train_result.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_train_result.setPixmap(QPixmap.fromImage(img_result)) elif -7 == show_mode: # 展示 正确率曲线 if 'random_forest' == self.model_name: # 随机森林没有正确率曲线 QMessageBox.information(self, '提示', '随机森林模型没有正确率曲线哦!', QMessageBox.Yes, QMessageBox.Yes) else: # 读取图片文件,进行显示 img = QImage(self.cache_path + '/' + self.model_name + '_train_valid_acc.png') img_result = img.scaled( self.ui.l_train_result.width(), self.ui.l_train_result.height(), # 裁剪图片将图片大小 Qt.IgnoreAspectRatio, Qt.SmoothTransformation) self.ui.l_train_result.setPixmap(QPixmap.fromImage(img_result))
def draw(self, qp, fps, objs, x0, y0, image): #qp.setWindow(x0, y0, self.w_,self.h_) # 设置窗口 # 画框架背景 qp.setBrush(QColor('#cecece')) # 框架背景色 qp.setPen(Qt.NoPen) rect = QRect(x0, y0, self.w_, self.h_) qp.drawRect(rect) sw, sh = self.w_, self.h_ # 图像窗口宽高 sh -= 40 pw, ph = 0, 0 # 缩放后的QPixmap大小 # 画图 yh = 0 if image is not None: ih, iw, _ = image.shape self.scale = sw / iw if sw / iw < sh / ih else sh / ih # 缩放比例 yh = round((self.h_ - 40 - ih * self.scale) / 2) #print(yh) qimage = QImage(image.data, iw, ih, 3 * iw, QImage.Format_RGB888) # 转QImage qpixmap = QPixmap.fromImage( qimage.scaled(self.w_, self.h_ - 40, Qt.KeepAspectRatio)) # 转QPixmap pw, ph = qpixmap.width(), qpixmap.height() #print(pw,ph) qp.drawPixmap(0 + x0, y0 + yh + 40, qpixmap) font = QFont() font.setFamily('Microsoft YaHei') if fps > 0: font.setPointSize(14) qp.setFont(font) pen = QPen() pen.setColor(Qt.white) qp.setPen(pen) qp.drawText(self.w_ - 150 + x0, y0 + 20, 'FPS: ' + str(round(fps, 2))) # 画目标框 pen = QPen() pen.setWidth(2) # 边框宽度 person = 0 hat = 0 for obj in objs: if obj["class"] == "person": person += 1 else: hat += 1 font.setPointSize(10) qp.setFont(font) rgb = [round(c) for c in obj['color']] pen.setColor(QColor(rgb[0], rgb[1], rgb[2])) # 边框颜色 brush1 = QBrush(Qt.NoBrush) # 内部不填充 qp.setBrush(brush1) qp.setPen(pen) # 坐标 宽高 tx, ty = round(pw * obj['x']), yh + round(ph * obj['y']) tw, th = round(pw * obj['w']), round(ph * obj['h']) obj_rect = QRect(tx + x0, y0 + ty + 40, tw, th) qp.drawRect(obj_rect) # 画矩形框 # 画 类别 和 置信度 qp.drawText(x0 + tx, y0 + ty + 40 - 5, str(obj['class']) + str(round(obj['confidence'], 2))) if fps > 0: pen = QPen() pen.setColor(Qt.red) font.setPointSize(14) qp.setFont(font) qp.setPen(pen) qp.drawText(0 + x0, y0 + 20, "there are {0} person".format(person + hat)) qp.drawText( x0 + 0, y0 + 40, "{0} people did not wear safety helmets".format(person))
class Picture(object): def __init__(self, path): super(Picture, self).__init__() self.path = path self.name = self.path.split("/")[-1] self.extension = os.path.splitext(self.path)[1] self.scale = 1.0 self.image = QImage(self.path) self.thumbnail = self.image.scaled(QSize(110, 110), aspectMode=Qt.KeepAspectRatio, mode=Qt.SmoothTransformation) self.resolution = "Resolution: " + str(self.image.width()) + "x" + str( self.image.height()) + "px" def deletePicture(self): os.remove(self.path) def verticalFlip(self): self.image = self.image.mirrored(vertically=True, horizontally=False) self.thumbnail = self.image.scaled(QSize(110, 110), aspectMode=Qt.KeepAspectRatio, mode=Qt.SmoothTransformation) self.image.save(self.path) def horizontalFlip(self): self.image = self.image.mirrored(horizontally=True, vertically=False) self.thumbnail = self.image.scaled(QSize(110, 110), aspectMode=Qt.KeepAspectRatio, mode=Qt.SmoothTransformation) self.image.save(self.path) def zoomIn(self): self.scale = self.scale * 0.9 def zoomOut(self): self.scale = self.scale * 1.1 def rotateCW(self): transform = QTransform() transform.translate(self.image.width() / 2, self.image.height() / 2) transform.rotate(90) self.image = self.image.transformed(transform) self.image.save(self.path) self.thumbnail = self.image.scaled(QSize(110, 110), aspectMode=Qt.KeepAspectRatio, mode=Qt.SmoothTransformation) def rotateCCW(self): transform = QTransform() transform.translate(self.image.width() / 2, self.image.height() / 2) transform.rotate(-90) self.image = self.image.transformed(transform) self.image.save(self.path) self.thumbnail = self.image.scaled(QSize(110, 110), aspectMode=Qt.KeepAspectRatio, mode=Qt.SmoothTransformation) def saveImage(self): self.image.save(self.path)
def _resize_bitmap(source_image: QImage) -> QImage: image = source_image.scaled(Block.SIDE_LENGTH, Block.SIDE_LENGTH) return image
class GalleryMiniature(QDialog): def __init__(self, pid=0, filename=0): super().__init__() #self.resize(800,600) self.pid = pid #print(filename) self.filename = filename layout = QHBoxLayout() self.baseLabel = MovableLabel() self.baseLabel.setFrameShape(QFrame.StyledPanel) self.baseLabel.setAlignment(Qt.AlignCenter) self.baseLabel.getRectCoords.connect(self.getRectCoordsSlot) self.miniLabel = QLabel() self.miniLabel.setAlignment(Qt.AlignCenter) self.base_image = QImage() self.mini_image = QImage() self.base_image_scaled = 0 #self.filename = 0 self.pix = 0 self.currentIndex = 0 self.real_size = 0 layout.addWidget(self.baseLabel) layout.addWidget(self.miniLabel) def openMenu(position): menu = QMenu() openAction = menu.addAction('Открыть') saveAction = menu.addAction('Сохранить') menu.addSeparator() nextAction = menu.addAction('Следующий') menu.addSeparator() quitAction = menu.addAction('Выход') action = menu.exec_(self.mapToGlobal(position)) if action == openAction: fileName = QFileDialog.getOpenFileName( self, "Изображение", "photos", "Фото (*.png *.jpg *.bmp *.JPG)") if len(fileName) > 1: self.filename = fileName[0] self.show_images() if action == saveAction: if self.filename: print('os.path.basename(self.filename)') path = os.path.basename(self.filename) root_ext = os.path.splitext(path) print(root_ext) if self.pid: root_ext = [str(pid) + '_001'] minifile = os.path.join( os.path.join('photos', 'miniatures'), root_ext[0] + '.png') if self.pix: self.pix.save(minifile, "PNG") print(minifile) if action == nextAction: if self.base_image: self.get_face_image() if action == quitAction: self.close() self.setContextMenuPolicy(Qt.CustomContextMenu) self.customContextMenuRequested.connect(openMenu) self.setLayout(layout) if self.filename: self.show_images() else: screen = QGuiApplication.primaryScreen() screenSize = screen.availableSize() sy = int((screenSize.height() - 20) / 4) sx = int((screenSize.width() - 20) / 4) self.setGeometry(sx, sy, int(screenSize.width() / 4), int(screenSize.height() / 4)) def show_images(self): #print(self.filename) self.base_image.load(self.filename) self.real_size = (self.base_image.width(), self.base_image.height()) screen = QGuiApplication.primaryScreen() screenSize = screen.availableSize() sy = int((screenSize.height() - 20) / 2) sx = int((screenSize.width() - 20) / 2) if not self.base_image_scaled: self.base_image_scaled = self.base_image.scaled( sx, sy, Qt.KeepAspectRatio) self.setGeometry(sx - int(self.base_image_scaled.width()), sy - int(self.base_image_scaled.height() / 2), self.base_image_scaled.width() * 2, self.base_image_scaled.height()) else: self.base_image_scaled = self.base_image.scaled( int(self.base_image_scaled.width()), int(self.base_image_scaled.height()), Qt.KeepAspectRatio) self.setGeometry(sx - int(self.base_image_scaled.width()), sy - int(self.base_image_scaled.height() / 2), self.base_image_scaled.width() * 2, self.base_image_scaled.height()) self.baseLabel.setPixmap(QPixmap.fromImage(self.base_image_scaled)) #self.miniLabel.setPixmap(QPixmap.fromImage(self.base_image)) self.get_face_image() def getRectCoordsSlot(self, coords): print(coords) if self.base_image: self.get_face_image(coords) def get_round_miniature(self, image): size = (300, 300) img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) #im = image #Image.open(image.png) im = crop(im, size) im.putalpha(prepare_mask(size, 4)) #im.save(filename) qim = ImageQt(im) self.pix = QPixmap.fromImage(qim) self.miniLabel.setPixmap(self.pix) #self.miniature = QImage(filename) def get_face_image(self, coords=0): with open(self.filename, 'rb') as f: chunk = f.read() chunk_arr = np.frombuffer(chunk, dtype=np.uint8) image = cv2.imdecode(chunk_arr, cv2.IMREAD_COLOR) if coords == 0: print(self.filename) #image = cv2.imread(self.filename) # !!!!!!!!!!!! Обязательно указать правильный путь к файлу !!!!!!!!!!!! # !!!!!!!!!!!! Путь до xml есть в https://github.com/opencv/opencv/tree/master/data/haarcascades !!!!!!!!!!!! face_cascade = cv2.CascadeClassifier( os.path.join('xml', 'haarcascade_frontalface_default.xml')) # !!!!!!!!!!!! Необходимо настроить параметры так как находит не все лица !!!!!!!!!!!! faces_coord = face_cascade.detectMultiScale(image, scaleFactor=1.2, minNeighbors=5, minSize=(110, 110)) if (len(faces_coord) > 0): for i, face in enumerate(faces_coord): (x, y, w, h) = face height, width, channels = image.shape s = int(min(width * 0.1, height * 0.1)) if y - s >= 0: y = y - s h = h + s if x - s >= 0: x = x - s w = w + s h = h + s if y + h + s < height else h w = w + s if x + w + s < width else w if i == self.currentIndex: crop_image = image[y:y + h, x:x + w] #cv2.imshow("Face", crop_image) self.get_round_miniature(crop_image) break #cv2.waitKey(0) #crop_image.save() self.currentIndex = 0 if self.currentIndex + 1 >= len( faces_coord) else self.currentIndex + 1 else: if self.real_size: #image = cv2.imread(self.filename) #x_scale = self.base_image.width() / self.base_image_scaled.width() #y_scale = self.base_image.height() / self.base_image_scaled.height() x_scale = self.base_image.width( ) * self.base_image_scaled.width() / ( self.base_image_scaled.width() * self.baseLabel.width()) y_scale = self.base_image.height( ) * self.base_image_scaled.height() / ( self.base_image_scaled.height() * self.baseLabel.height()) print( (int(coords[0][1] * y_scale), int(coords[1][1] * y_scale), int(coords[0][0] * x_scale), int(coords[1][0] * x_scale))) crop_image = image[int(coords[0][1] * y_scale):int(coords[1][1] * y_scale), int(coords[0][0] * x_scale):int(coords[1][0] * x_scale)] self.get_round_miniature(crop_image)
class ObjectIcon(QWidget): MIN_SIZE = QSize(32, 32) MAX_SIZE = MIN_SIZE * 2 clicked: SignalInstance = Signal() object_placed: SignalInstance = Signal() def __init__(self, level_object: Optional[LevelObject] = None): super(ObjectIcon, self).__init__() size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.setSizePolicy(size_policy) self.zoom = 1 self.object = None self.image = QImage() self.set_object(level_object) self.draw_background_color = True self.max_size = self.MIN_SIZE def mouseMoveEvent(self, event): if not (event.buttons() & Qt.LeftButton): return super(ObjectIcon, self).mouseMoveEvent(event) drag = QDrag(self) mime_data = QMimeData() object_bytes = bytearray() if isinstance(self.object, LevelObject): object_bytes.append(0) else: object_bytes.append(1) object_bytes.extend(self.object.to_bytes()) mime_data.setData("application/level-object", object_bytes) drag.setMimeData(mime_data) if drag.exec_() == Qt.MoveAction: self.object_placed.emit() def set_object(self, level_object: Union[LevelObject, EnemyObject]): if level_object is not None: self.object = get_minimal_icon_object(level_object) self.image = self.object.as_image() self.setToolTip(self.object.name) else: self.image = QImage() self.setToolTip("") self.update() def heightForWidth(self, width: int) -> int: current_width, current_height = self.image.size().toTuple() height = current_height / current_width * width return height def sizeHint(self): if self.object is not None and self.fits_inside( self.image.size() * 2, self.max_size): return self.image.size() * 2 else: return self.max_size def paintEvent(self, event: QPaintEvent): if self.object is not None: painter = QPainter(self) if self.draw_background_color: painter.fillRect( event.rect(), QColor(*bg_color_for_palette(self.object.palette_group))) scaled_image = self.image.scaled(self.size(), aspectMode=Qt.KeepAspectRatio) x = (self.width() - scaled_image.width()) // 2 y = (self.height() - scaled_image.height()) // 2 painter.drawImage(x, y, scaled_image) return super(ObjectIcon, self).paintEvent(event) def mouseReleaseEvent(self, event: QMouseEvent): self.clicked.emit() return super(ObjectIcon, self).mouseReleaseEvent(event) @staticmethod def fits_inside(size1: QSize, size2: QSize): return size1.width() <= size2.width() and size1.height( ) <= size2.height()
def resize_qimage(self, image: QImage) -> QImage: return image.scaled(self.resize_to, self.resize_to, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
class OrgAppWindows(QMainWindow): def __init__(self): super(OrgAppWindows, self).__init__() self.setWindowTitle("Tournament Organization powered by Jcigi") self.resize(1000, 1000) self.widget = QWidget() self.main_layout = QVBoxLayout() self.widget.setLayout(self.main_layout) self.setCentralWidget(self.widget) self.background_image = QImage("images/gdc_logo_uj.png") self.image_rect = QRect() # A menus.py definiálja a menüpontokat create_menus_org(self) def paintEvent(self, e): painter = QPainter() painter.begin(self) self.drawWidget(painter) painter.end() def drawWidget(self, painter): rect = self.rect() hatter = self.background_image.scaled( QSize(rect.width(), rect.height()), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.image_rect.setRect(rect.x(), rect.y(), hatter.width(), hatter.height()) self.image_rect.moveCenter(rect.center()) painter.setOpacity(0.05) painter.drawImage(self.image_rect, QImage(hatter)) @Slot() def exit_app(self): QApplication.quit() @Slot() def torna_settings(self): self.torna_settings_window = TornaSettingsDialog(self) self.torna_settings_window.show() @Slot() def torna_settings2(self): self.torna_settings_window = TornaSettingsDialog( self, 0) # todo ezt majd az aktív tornák közül kell kiválasztani self.torna_settings_window.show() @Slot() def create_players(self): self.select_players_window = SelectPlayersWindow(self) self.select_players_window.show() @Slot() def create_boards(self): self.create_boards_window = CsoportTabla(self) self.create_boards_window.show() @Slot() def torna_status(self): self.torna_status_window = TornaStatuszWindow(self) self.torna_status_window.show()