def cvPic2Qimg(self, img):
        """
        将用 opencv 读入的图像转换成qt可以读取的图像

        ========== =====================
        序号       支持类型
        ========== =================
                 1 灰度图 Gray
                 2 三通道的图 BGR顺序
                 3 四通道的图 BGRA顺序
        ========= ===================
        """
        if (len(img.shape) == 2):
            # 读入灰度图的时候
            image = array2qimage(img)
        elif (len(img.shape) == 3):
            # 读入RGB或RGBA的时候
            if (img.shape[2] == 3):
                #转换为RGB排列
                RGBImg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                #RGBImg.shape[1]*RGBImg.shape[2]这一句时用来解决扭曲的问题
                #详情参考 https://blog.csdn.net/owen7500/article/details/50905659 这篇博客
                image = QtGui.QImage(RGBImg, RGBImg.shape[1], RGBImg.shape[0],
                                     RGBImg.shape[1] * RGBImg.shape[2],
                                     QtGui.QImage.Format_RGB888)
            elif (img.shape[2] == 4):
                #读入为RGBA的时候
                RGBAImg = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
                image = array2qimage(RGBAImg)
        return image
Exemple #2
0
    def DispImg(self):
        results = captureface.detect_face(self.Image)
        if results is not ():
            faceboxes = results  # 提取人脸位置信息
            for (x, y, w, h) in faceboxes:
                face = self.Image[y:y + h, x:x + w]  # 截取图片中的人脸图像
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)  # 转为灰度图片
                face = cv2.resize(face, (64, 64))  # 压缩成指定大小
                face = face.reshape([1, 64, 64, 1])
                cnnNet = CnnNet(modelfile='./temp/train-model')  # 注意这步很关键,起到了重置计算图的作用,否则多次导入训练好的计算图会出现tensor重复的问题
                res, pre = cnnNet.predict(test_x=face)  # 调用已训练好的模型进行预测
                if np.max(pre) < 0.8:  # 通过调整阈值为threshold,当返回数组最大值小于threshold是即视为unknown
                    self.name = "unknown"
                else:
                    self.name = number_name[res[0]]
                    self.flagThread.get_name(self.name)

                # cv2.putText(self.Image, self.name, (int(x), int(y) - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
                cv2.rectangle(self.Image, (int(x), int(y)), (int(x + w), int(y + h)), (255, 0, 0), 3)  # 将name显示出来
                img = cv2ImgAddText(self.Image, self.name, int(x+25), int(y-50))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                qimg = qimage2ndarray.array2qimage(img)  # 调用array2qimage函数将其转为QImage格式
                self.DispLb.setPixmap(QPixmap(qimg))  # 再通过QPixmap函数转为QPixmap格式进行显示。
                self.DispLb.show()  # 图像显示
                gc.collect()

        else:
            img = cv2.cvtColor(self.Image, cv2.COLOR_BGR2RGB)
            qimg = qimage2ndarray.array2qimage(img)  # 调用array2qimage函数将其转为QImage格式
            self.DispLb.setPixmap(QPixmap(qimg))  # 再通过QPixmap函数转为QPixmap格式进行显示。
            self.DispLb.show()  # 图像显示
 def browse(self, label, RGB=False):
     file, _ = QFileDialog.getOpenFileName(
         self, "Browse", "Choose",
         "Filter -- All Files (*);;Image Files (*)")
     if (label == self.ui.label_hough_input):
         self.path_hough = file
         if file:
             self.imagenew = cv2.imread(file)
             self.imagenew = cv2.resize(self.imagenew, (200, 200),
                                        interpolation=cv2.INTER_AREA)
             qimg = qtimg.array2qimage(self.imagenew)
             label.setPixmap(QPixmap(qimg))
             file_name = ntpath.basename(str(file))
             return self.imagenew
     elif (label == self.ui.label_active_contours_input):
         self.path_snake = file
         if file:
             self.imagenew = cv2.imread(file)
             self.imagenew = cv2.resize(self.imagenew, (200, 200),
                                        interpolation=cv2.INTER_AREA)
             qimg = qtimg.array2qimage(self.imagenew)
             label.setPixmap(QPixmap(qimg))
             file_name = ntpath.basename(str(file))
             return self.imagenew
     elif (label == self.ui.label_canny_input):
         self.path_canny = file
    def add_noise(self):
        self.filters = str(self.ui.comboBox.currentText())

        if self.filters == "Gaussian":
            #self.input_iamge=array2qimage.qimage2ndarray(self.input_iamge)
            self.filter_img = self.im_gaussian_noise(0, 0.3)
            self.filter = np.array(self.filter_img * 200)
            self.input_iamge = qimage2ndarray.array2qimage(self.filter)
            self.output_iamge = QPixmap(self.input_iamge)
            self.ui.label_filters_output.setPixmap(self.output_iamge)
            self.ui.label_filters_output.show()

        elif self.filters == "Uniform":
            self.filter_img = self.Random_Uniform(0.3)
            self.filter = np.array(self.filter_img * 300)
            self.input_iamge = qimage2ndarray.array2qimage(self.filter)
            self.output_iamge = QPixmap(self.input_iamge)
            self.ui.label_filters_output.setPixmap(self.output_iamge)
            self.ui.label_filters_output.show()
            print("1212")

    #
        elif self.filters == "Salt-papper":
            self.filter_img = self.salt_pepper_noise(0.3)
            self.filter = np.array(self.filter_img * 200)
            self.input_iamge = qimage2ndarray.array2qimage(self.filter)
            self.output_iamge = QPixmap(self.input_iamge)
            self.ui.label_filters_output.setPixmap(self.output_iamge)
            self.ui.label_filters_output.show()
    def numpyArrayToQPixMap(self, array):
        height = None
        width = None
        color_image = False
        if len(array.shape) == 3:
            height, width, _ = array.shape
            color_image = True
        elif len(array.shape) == 2:
            height, width = array.shape

        if height and width:
            bytes_per_line = 3 * width
            qImage = None
            if array.dtype == "uint8":
                if color_image:
                    # Color image (uint8)
                    qImage = qtg.QImage(
                        array.data,
                        width,
                        height,
                        bytes_per_line,
                        qtg.QImage.Format_RGB888,
                    )
                else:
                    qImage = qimage2ndarray.array2qimage(array)
            elif array.dtype == "float64":
                qImage = qimage2ndarray.array2qimage(array)

            if qImage:
                return qtg.QPixmap(qImage)
    def grabImage(self):
        xSlice = slice(660, 1250)
        ySlice = slice(225, 825)

        ret, img_org = self.cam.read()
        img = np.rot90(img_org[ySlice, xSlice], 2)

        if self.circle:
            try:
                qi = qim2np.array2qimage(self.draw_circle(img))
            except IndexError:
                qi = qim2np.array2qimage(img)
        else:
            qi = qim2np.array2qimage(img)


        pixmap = QtGui.QPixmap()
        px = QtGui.QPixmap.fromImage(qi)

        if self.bgImg:
            self.ui.gv_preview.scene().removeItem(self.bgImg)

        self.bgImg = QtGui.QGraphicsPixmapItem(px)
        self.ui.gv_preview.scene().addItem(self.bgImg)
        self.bgImg.setZValue(-100)
        self.bgImg.setPos(0, 0)

        self.ui.gv_preview.ensureVisible(self.bgImg)
        self.ui.gv_preview.fitInView(self.ui.gv_preview.scene().itemsBoundingRect())

        return img
Exemple #7
0
def cv_to_qt(img):
    """
    将用 opencv 读入的图像转换成qt可以读取的图像
    ========== =====================
    序号       支持类型
    ========== =================
             1 灰度图 Gray
             2 三通道的图 BGR顺序
             3 四通道的图 BGRA顺序
    ========= ===================
    """
    if len(img.shape) == 2:
        # 读入灰度图的时候
        image = array2qimage(img)
    elif len(img.shape) == 3:
        # 读入RGB或RGBA的时候
        if img.shape[2] == 3:
            # 转换为RGB排列
            rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
            # rgb_img.shape[1]*rgb_img.shape[2]这一句时用来解决扭曲的问题
            # 详情参考 https://blog.csdn.net/owen7500/article/details/50905659 这篇博客
            image = QImage(rgb_img, rgb_img.shape[1], rgb_img.shape[0],
                           rgb_img.shape[1] * rgb_img.shape[2], QImage.Format_RGB888)
        elif img.shape[2] == 4:
            # 读入为RGBA的时候
            rgba_img = cv.cvtColor(img, cv.COLOR_BGRA2RGBA)
            image = array2qimage(rgba_img)
    return image
    def update_annotator_view(self):

        # If there is no image, there's nothing to clear
        if self.current_image is None:
            return

        if self.annotation_mode is self.ANNOTATION_MODE_MARKING_DEFECTS:
            h, w = self.current_image.rect().height(), self.current_image.rect(
            ).width()

            helper = np.zeros((h, w, 4), dtype=np.uint8)
            helper[self.current_helper == 0] = list(HELPER_COLOR.getRgb())

            self.annotator.clearAndSetImageAndMask(
                self.current_image,
                self.current_defects,
                array2qimage(helper),
                aux_helper=(array2qimage(self.current_tk)
                            if self.current_tk is not None else None),
                process_gray2rgb=True,
                direct_mask_paint=True)
        else:

            # Remember, the mask must be inverted here, but saved properly
            h, w = self.current_image.rect().height(), self.current_image.rect(
            ).width()
            mask = 255 * np.zeros((h, w, 4), dtype=np.uint8)
            mask[self.current_updated_mask == 0] = list(
                MARK_COLOR_MASK.getRgb())

            self.annotator.clearAndSetImageAndMask(self.current_image, mask)
Exemple #9
0
    def updateImages(self):
        # Check if an image is loaded
        if self.imageSrc is None:
            return

        # Image Source
        pixmapSrc = QPixmap(qimage2ndarray.array2qimage(self.imageSrc))
        pixmapSrc = pixmapSrc.scaled((self.xsize / 3) - 50, self.ysize - 50,
                                     QtCore.Qt.KeepAspectRatio)

        self.imageLabelSrc.setPixmap(pixmapSrc)

        # Image Result
        self.imageRes1 = area_filter(input=self.imageSrc,
                                     threshold=max(
                                         1, self.areaThresholdSpinbox.value()),
                                     maxtree_p_s=self.maxtree)

        pixmapRes1 = QPixmap(qimage2ndarray.array2qimage(self.imageRes1))
        pixmapRes1 = pixmapRes1.scaled((self.xsize / 3) - 50, self.ysize - 50,
                                       QtCore.Qt.KeepAspectRatio)

        self.imageRes2 = contrast_filter(
            input=self.imageSrc,
            threshold=max(1, self.areaThresholdSpinbox.value()),
            maxtree_p_s=self.maxtree)

        pixmapRes2 = QPixmap(qimage2ndarray.array2qimage(self.imageRes2))
        pixmapRes2 = pixmapRes2.scaled((self.xsize / 3) - 50, self.ysize - 50,
                                       QtCore.Qt.KeepAspectRatio)

        self.imageLabelRes1.setPixmap(pixmapRes1)
        self.imageLabelRes2.setPixmap(pixmapRes2)
Exemple #10
0
 def dwt_exc(self):
     # get picture from QLabel
     image1 = self.img1.pixmap().toImage()
     image2 = self.img2.pixmap().toImage()
     # QImage to array,otherwise cannot use matlab
     cover = QImageToCvMat(image1)
     message1 = QImageToCvMat(image2)
     height = message1.shape[0]
     width = message1.shape[1]
     cover_object = matlab.uint8(cover.tolist())
     message = matlab.uint8(message1.tolist())
     # var is number of attact function
     var = self.comboBox.currentIndex() + 1
     # 调用matlab
     engine = matlab.engine.start_matlab()
     watermrkd_img, recmessage, attack_image, attack_message, PSNR, NCC, MSSIM, PSNR_a, NCC_a, MSSIM_a = engine.dwt(
         cover_object, message, height, width, var, nargout=10)
     watermrkd_img = np.array(watermrkd_img)
     recmessage = np.array(recmessage)
     attack_image = np.array(attack_image)
     attack_message = np.array(attack_message)
     # transform picture from matlab (array to qimage), and show on the GUI
     jpg3 = qimage2ndarray.array2qimage(watermrkd_img)
     watermrkd = QtGui.QPixmap(jpg3)
     self.img3.setScaledContents(True)
     self.img3.setPixmap(watermrkd)
     jpg4 = qimage2ndarray.array2qimage(recmessage)
     rec = QtGui.QPixmap(jpg4)
     self.img4.setScaledContents(True)
     self.img4.setPixmap(rec)
     jpg5 = qimage2ndarray.array2qimage(attack_image)
     atk = QtGui.QPixmap(jpg5)
     self.img5.setScaledContents(True)
     self.img5.setPixmap(atk)
     jpg6 = qimage2ndarray.array2qimage(attack_message)
     atk_msg = QtGui.QPixmap(jpg6)
     self.img6.setScaledContents(True)
     self.img6.setPixmap(atk_msg)
     # 弹出评价值表格
     self.child = ChildWindow()
     self.child.dialog.tableWidget.setItem(0, 0,
                                           QTableWidgetItem("%.8f" % PSNR))
     self.child.dialog.tableWidget.setItem(0, 1,
                                           QTableWidgetItem("%.8f" % NCC))
     self.child.dialog.tableWidget.setItem(0, 2,
                                           QTableWidgetItem("%.8f" % MSSIM))
     self.child.dialog.tableWidget.setItem(
         1, 0, QTableWidgetItem("%.8f" % PSNR_a))
     self.child.dialog.tableWidget.setItem(1, 1,
                                           QTableWidgetItem("%.8f" % NCC_a))
     self.child.dialog.tableWidget.setItem(
         1, 2, QTableWidgetItem("%.8f" % MSSIM_a))
     layout = QHBoxLayout()
     layout.addWidget(self.child.dialog.tableWidget)
     self.child.setLayout(layout)
     self.child.show()
Exemple #11
0
    def initialise_external(self, experiment):
        super().initialise_external(experiment)

        # Get background image from folder:
        if isinstance(self._background, str):
            self._qbackground = qimage2ndarray.array2qimage(
                existing_file_background(self._experiment.asset_dir + "/" +
                                         self._background))
        else:
            self._qbackground = qimage2ndarray.array2qimage(self._background)
    def principal(self):
        frame = self.capture.read(
        )  #Read frame from camera and repaint QLabel widget.
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #frame = cv2.flip(frame, 1)
        image = qimage2ndarray.array2qimage(frame)  #SOLUTION FOR MEMORY LEAK
        self.stream.setPixmap(QtGui.QPixmap(image))
        barcodes = pyzbar.decode(frame)

        for barcode in barcodes:  # loop nos códigos reconhecidos
            (x, y, w, h) = barcode.rect  # pegando as bordas do qr
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255),
                          4)  # e desenhando em volta
            barcodeData = barcode.data.decode(
                "utf-8")  # o que foi lido é em bytes, transformando em texto
            #text = "{}".format(barcodeData)	# transformando em uma string para ser mostrado
            #print(barcodeData) # esse é o que tem no qrcode
            #cv2.putText(frame, text, (x, y - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)		# desenhando o código e o tipo dele na imagem
            image = qimage2ndarray.array2qimage(
                frame)  #SOLUTION FOR MEMORY LEAK
            self.stream.setPixmap(QtGui.QPixmap(image))
            separar = barcodeData.split(
                "\r\n"
            )  # separando os dados lidos, é separado no \r\n de cada. exemplo TESTE3\r\n1\r\n1 ficará TESTE3,1,1 ---  rg=9 números ra=13 números
            print("Separando " + str(separar))

            #try:
            nome = separar[0]
            rg = str(separar[1])  # rg
            ra = str(separar[2])  # ra
            print("rg=" + (rg))
            comando = "select nome from pessoas where rg=md5('{}') and ra=md5('{}')".format(
                rg, ra
            )  # e prepará o envio da pergunta 'o rg e o ra estão no banco de dados?' e retorna o nome da pessoa ----- talvez vulnerável a sql injection
            mycursor.execute(comando)  # executa a ação
            self.myresult = mycursor.fetchall(
            )  # terminado a execução do comando é necessário isso -- https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlcursor-fetchall.html

            if str(self.myresult) == "[]":
                print("nao cadastrado, tentando cadastrar")
                self.mensageboxCadastro()

            else:
                print("usuario cadastrado")
                print(
                    "Bem Vindo {}".format(
                        str(self.myresult).replace("[('",
                                                   "").replace("',)]", ""))
                )  # mostra no terminal a mensagem "Bem Vindo" + o nome do usuário formatado corretamente
                guardando = "insert into controle (ra,datas) values ('{}',current_timestamp())".format(
                    ra)  # guadando a quem entrou na sala no banco de dados
                mycursor.execute(guardando)  # executando a ação
                cadastrodb.commit()  # necessário para fazer as mudança
                print("sucesso?")
                self.mensageboxBemVindo()
Exemple #13
0
    def DispImg(self):
        if self.GrayCheck.isChecked():
            qimg = qimage2ndarray.array2qimage(self.GrayImg)
            #CVimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)

        else:
            CVimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
            #print(CVimg.shape)
            qimg = qimage2ndarray.array2qimage(CVimg)
        self.DispLb.setPixmap(QPixmap(qimg))
        self.DispLb.show()
Exemple #14
0
    def apply_roi(self):
        img_diff, img_frames = roi.roi_espiral(self.matrix_rgb)
        img1 = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(self.matrix_rgb[self.ang_hor][self.ang_ver]))
        for i in range (len(img_diff)):
            img_diff[i] = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(img_diff[i]))
            img_frames[i] = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(img_frames[i]))

        viewframe = ViewFrame3(self)

        viewframe.initialize(img1, img_frames, img_diff)
        viewframe.show_image_roi(0)
        viewframe.show()
Exemple #15
0
 def apply_transformations(self):
     img = self.matrix_rgb[self.ang_hor][self.ang_ver]
     # Valores dos Fatores
     f_br = (self.spinBox_brilho.value()+100)/100.
     f_co = (self.spinBox_contraste.value()+100)/100.
     f_sh = (self.spinBox_nitidez.value()+100)/100.
     f_sa = (self.spinBox_saturacao.value()+100)/100.
     img, hist = im.transformations(img, f_br, f_co, f_sh, f_sa, self.radioButton_red.isChecked(), self.radioButton_green.isChecked(), self.radioButton_blue.isChecked())
     img  = qimage2ndarray.array2qimage(img)
     hist = qimage2ndarray.array2qimage(hist)
     hist = hist.copy(80, 58, 496, 370)
     return QtGui.QPixmap.fromImage(img), QtGui.QPixmap.fromImage(hist)
Exemple #16
0
    def CopyImg(self):
        ret = QRect(20, 200, 481, 20)
        if self.GrayCheck.isChecked():
            qimg = qimage2ndarray.array2qimage(self.GrayImg)
            #CVimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        else:
            CVimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
            qimg = qimage2ndarray.array2qimage(CVimg)

        b = qimg.copy(ret)
        self.DispCopyImg.setPixmap(QPixmap(b))
        self.DispCopyImg.show()
Exemple #17
0
 def invert(self):
     print("invert Pressed")
     global qimg2
     imgNdArray = qimage2ndarray.rgb_view(qimg2)
     invertImage = util.invert(imgNdArray)
     qimg2 = qimage2ndarray.array2qimage(invertImage)
     pixmap2 = QtGui.QPixmap.fromImage(
         qimage2ndarray.array2qimage(invertImage))
     pixmap2 = pixmap2.scaled(self.label_2.width(), self.label_2.height(),
                              QtCore.Qt.KeepAspectRatio)
     self.label_2.setPixmap(pixmap2)
     self.label_2.setAlignment(QtCore.Qt.AlignCenter)
Exemple #18
0
    def run(self):
        try:
            self.cap = cv2.VideoCapture(0)
        except Exception as E:
            print(E)

        while self.cap.isOpened():
            global code, order_code, score

            ret, f = self.cap.read()
            f = cv2.resize(f, (800, 600), interpolation=cv2.INTER_AREA)
            gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
            gray = shadow_remover(cv2, gray)
            blurred = cv2.GaussianBlur(gray, (5, 5), 0)
            edged = cv2.Canny(blurred, 65, 150)
            f, order_code = read_qrcode(cv2, f)

            roi = None
            try:
                f, roi, roi_gray = detect_roi2(cv2, f, edged, gray)
                roi = cv2.resize(roi, (260, 615), interpolation=cv2.INTER_AREA)
                roi_gray = cv2.resize(roi_gray, (260, 615),
                                      interpolation=cv2.INTER_AREA)
                if order_code != None and code != order_code:
                    code = order_code

                cv2.putText(f, "sequence = {}".format(code), (10, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))
                #the helping visual rectangle
                cv2.putText(f, 'aligner les rectangles', (305, 140),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                cv2.rectangle(f, (315, 155), (315 + 137, 155 + 318),
                              (255, 0, 0), 2)

                if code != None:

                    score, _ = preprocess(
                        cv2, roi, roi_gray,
                        get_ordered_answers(code, answers, nb_questions),
                        nb_questions)
                    cv2.putText(f, "score = {}".format(score), (10, 35),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))
                    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
                    roi = cv2.resize(roi, (150, 354),
                                     interpolation=cv2.INTER_AREA)
                    self.showmarked.emit(qimage2ndarray.array2qimage(roi))

            except Exception as E:
                #print(E)
                pass

            f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
            self.changePixmap.emit(qimage2ndarray.array2qimage(f))
Exemple #19
0
    def apply_flyview(self):
        matrix_aux = [[0 for x in range(15)]for y in range(15)]
        for i in range(0, 15):
            for j in range (0, 15):
                img_aux = qimage2ndarray.array2qimage(self.matrix_rgb[i][j])
                img_aux = QtGui.QPixmap.fromImage(img_aux).scaled(123, 86, aspectRatioMode=1).toImage()
                matrix_aux[i][j] = qimage2ndarray.rgb_view(img_aux)

        img_mi = mw.mosca_window(matrix_aux, 0.5)
        #img_mi = qimage2ndarray.array2qimage(img_mi)

        self.to_viewframe(img_mi)
        QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(img_mi)).save("visao_de_mosca.png", "PNG")
Exemple #20
0
    def split_and_merge(self):
        self.loaded_image = cv2.imread(PROJECT_FOLDER + '/image.png')
        self.current_frame = cv2.cvtColor(self.loaded_image,
                                          cv2.COLOR_BGR2GRAY)

        sam_worker = SAM_worker.SAM(self.current_frame)
        self.cvl_image.setPixmap(
            QPixmap.fromImage(qimage2ndarray.array2qimage(sam_worker.image)))

        sam_worker.split_and_merge()

        self.cvl_image_2.setPixmap(
            QPixmap.fromImage(qimage2ndarray.array2qimage(sam_worker.debug)))
Exemple #21
0
 def DispFrame(self, cmd):
     if cmd == "DC" and (self.DCFrame.any()):
         self.DCFrame = cv2.resize(self.DCFrame, (640, 480))
         Qframe = qimage2ndarray.array2qimage(self.DCFrame)
         self.DC_Window.setPixmap(QPixmap(Qframe))
         self.DC_Window.setScaledContents(True)
         self.DC_Window.show()
     elif cmd == "IC" and (self.ICFrame.any()):
         self.ICFrame = cv2.resize(self.ICFrame, (640, 480))
         Qframe = qimage2ndarray.array2qimage(self.ICFrame)
         self.IC_Window.setPixmap(QPixmap(Qframe))
         self.IC_Window.setScaledContents(True)
         self.IC_Window.show()
Exemple #22
0
def any2pixmap(img):
    """ convert multiple inputs to pixmap """
    if isinstance(img, np.ndarray):  # numpy array
        if img.dtype == np.float:
            image = q2a.array2qimage(float2uint8(img))
        else:
            image = q2a.array2qimage(img)
        pixmap = QPixmap.fromImage(image)
    elif isinstance(img, str):  # path to image file
        pixmap = QPixmap(img)
    elif isinstance(img, QImage):  # QImage input
        pixmap = QPixmap.fromImage(image)
    else:
        raise ValueError('Incorrect input, must be ndarray, QImage or str')
    return pixmap
Exemple #23
0
 def run(self):
     cap = cv2.VideoCapture(self.media_path)
     self.flag = True
     while self.flag:
         ret, image = cap.read()
         if ret:
             frame, frm, total_p, low_risk_p, high_risk_p, safe_p = yolo(
                 image)
             qtimg = qimage2ndarray.array2qimage(
                 cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
             sqtimg = qimage2ndarray.array2qimage(
                 cv2.cvtColor(frm, cv2.COLOR_BGR2RGB))
             self.UpdateSignal.emit(qtimg, sqtimg, total_p, low_risk_p,
                                    high_risk_p, safe_p)
     cap.release()
Exemple #24
0
 def updateLabelWithSpectrogram(self, spec):
     # clrSpec = np.uint8(plt.cm.binary(spec / np.max(spec)) * 255)#To change color, alter plt.cm.jet to plt.cm.#alternative code#
     clrSpec = np.uint8(self.cm(spec / 18.0) * 255)#To change color, alter plt.cm.jet to plt.cm.#alternative code#
     clrSpec = np.rot90(clrSpec, 1)
     # clrSpec = spmisc.imresize(clrSpec, 0.25)
     qi = qim2np.array2qimage(clrSpec)#converting from numpy array to qt image
     self.setBackgroundImage(qi)
Exemple #25
0
 def _setImage(self, image, normalize):
     self.image = image
     if hasattr(image, 'qimage'):
         qImage = image.qimage(normalize)
     else:
         qImage = qimage2ndarray.array2qimage(image, normalize)
     self.viewer.setImage(qImage)
Exemple #26
0
def convert_bitmap(image, width=0, height=0):
    if isinstance(image, ImageResource):
        pix = traitsui_convert_bitmap(image)
    elif isinstance(image, (PILImage.Image,)):
        try:
            data = image.tostring('raw', 'RGBA')
        except NotImplementedError:
            data = image.tobytes('raw', 'RGBA')
        im = QImage(data, image.size[0], image.size[1], QImage.Format_ARGB32)
        pix = QPixmap.fromImage(QImage.rgbSwapped(im))
    else:
        s = image.shape
        if len(s) >= 2:
            pix = QPixmap.fromImage(array2qimage(image))
        else:
            pix = QPixmap()

    if pix:
        if width > 0 and height > 0:
            pix = pix.scaled(width, height)
        elif width > 0:
            pix = pix.scaledToWidth(width)
        if height > 0:
            pix = pix.scaledToHeight(height)

    return pix
 def showFrame(self, i=None):
     """ Display the i^th frame in the viewer.
     Also update the frame slider position and current frame text.
     """
     frame = self.getFrame(i)
     if frame is None:
         return
     # Convert frame ndarray to a QImage.
     qimage = qimage2ndarray.array2qimage(frame, normalize=True)
     self.viewer.setImage(qimage)
     self.currentFrameIndex = i
     # Update frame slider position (hide frame slider if we only have one image frame).
     numFrames = self.numFrames()
     if numFrames > 1:
         self.frameSlider.setRange(1, numFrames)
         self.frameSlider.setValue(i)
         self.frameSlider.show()
         self.prevFrameButton.show()
         self.nextFrameButton.show()
         self.currentFrameLabel.setText(str(i+1) + "/" + str(numFrames))
         self.currentFrameLabel.show()
     else:
         self.frameSlider.hide()
         self.prevFrameButton.hide()
         self.nextFrameButton.hide()
         self.currentFrameLabel.hide()
     self.frameChanged.emit()
     self.frameChanged[int].emit(i)
Exemple #28
0
 def drawStationName(self):
     """Draw station name snippet to station_name_img"""
     res = self.current_result
     name = res.station.name
     #self.station_name.setText('')
     #self.station_name.clear()
     #self.station_name.addItems(name.optional_values)
     if not self.file_list.currentItem().station is None:
             self.station_name.setText(self.file_list.currentItem().station)
     else:
         self.station_name.setText(name.value)
     #font = QFont("Consolas", 11)
     #self.station_name.lineEdit().setFont(font)
     #self.setConfidenceColor(self.station_name, name)
     img = self.cutImage(res.contrast_station_img, name)
     if self.dark_theme: 
         img = 255 - img
     processedimage = array2qimage(img)
     pix = QPixmap()
     pix.convertFromImage(processedimage)
     if self.station_name_img.height() < pix.height():
         pix = pix.scaled(self.station_name_img.size(),
                          Qt.KeepAspectRatio,
                          Qt.SmoothTransformation)
     scene = QGraphicsScene()
     scene.addPixmap(pix)
     
     self.station_name_img.setScene(scene)
     self.station_name_img.show()
 def addPreviewImage(self, color_image, parent = None):
     image = color_image
     image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     if not parent is None:
         parent.progress_bar.setValue(12)
     h, w = image.shape
     self.img_height = h
     self.ocr_areas = OCRAreasFinder(color_image)
     self.market_width = self.ocr_areas.market_width
     self.hud_color = self.ocr_areas.hud_color
     if not parent is None:
         parent.progress_bar.setValue(14)
     points = self.ocr_areas.market_table
     self.valid_market = self.ocr_areas.valid
     if self.valid_market:
         cut = image[0:points[1][1] + 20,
                     0:points[1][0] + 20]
     else:
         cut = image[:]
     processedimage = array2qimage(cut)
     if not parent is None:
         parent.progress_bar.setValue(16)
     pix = QPixmap()
     pix.convertFromImage(processedimage)
     if not parent is None:
         parent.progress_bar.setValue(18)
     return pix
Exemple #30
0
    def loadDatum(self, key):
        img = QtGui.QImage()
        img.load(key)

        rawImg = QI2A.recarray_view(img)

        background = np.zeros((rawImg.shape[0], rawImg.shape[1], 4),
                              dtype=rawImg['r'].dtype)

        background[:,:,0] = rawImg['r']
        background[:,:,1] = rawImg['g']
        background[:,:,2] = rawImg['b']
        background[:,:,3] = rawImg['a']

        # crop and rotate background image to show only one vial
        rng = slice(*self.vialROI)
        background = np.rot90(background[:, rng]).astype(np.uint32)

        h = background.shape[0]
        w = background.shape[1]

        # grey conversion
        b = background[:,:,0] * 0.2126 + \
            background[:,:,1] * 0.7152 + \
            background[:,:,2] * 0.0722
        background[:,:,0] = b
        background[:,:,1] = b
        background[:,:,2] = b

        im = QI2A.array2qimage(background)
        im = im.convertToFormat(QtGui.QImage.Format_RGB32)

        return im
Exemple #31
0
    def from_numpy(self, data):

        self._qimage = array2qimage(data)
        # safe the data for garbage collection
        # do I really need this??
        self._qimage.ndarray = data
        self._update()
Exemple #32
0
 def changeindexcombo4(self):
     """méthode exécutée en cas de changement d'affichage du combo4
     """
     self.combo5.clear()
     print "combo4 cur Index",self.combo4.currentIndex()
     self.node4=self.node3[self.combo3.currentIndex()].getchildren()
     parent=self.node4[self.combo4.currentIndex()]
     #demander ses enfants,
     ##demander le nom des enfants
     childrenlist=self.GetChildrenName(parent)
     self.combo5.addItems(QtCore.QStringList(childrenlist))
     pathto=os.path.join(str(self.combo1.currentText()),\
                         str(self.combo2.currentText()),\
                         str(self.combo3.currentText()),\
                         str(self.combo4.currentText()),\
                         str(self.combo5.currentText()))
     self.lbl.setText(str(pathto))
     workdir="/home/simon/MFISH/"
     ndimage=imread.imread(workdir+str(pathto))
     convert=qnd.array2qimage(ndimage[::2,::2],normalize=True)
     qpix = QtGui.QPixmap(convert)
     image = QtGui.QLabel(self)
     image.setPixmap(qpix)
     #posit = QtGui.QGridLayout()
     self.posit.addWidget(image, 2, 0)
     self.setLayout(self.posit)
     self.setWindowTitle('Cascade Menus')
     self.show()
Exemple #33
0
 def wait( self ):
     for i, req in enumerate(self._requests):
         a = self._requests[i].wait()
         a = a.squeeze()
         self._data[:,:,i] = a
     img = array2qimage(self._data)
     return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)        
Exemple #34
0
    def changeindexcombo5(self):
        """méthode exécutée en cas de changement d'affichage du combo5
        """
        #self.combo5.clear()#surtout pas!!
        #quel le parent?
        #recup l'item actif de combo4
        print "combo5 cur Index",self.combo5.currentIndex()
        #identifier le node correspondant4
        ## suppossons que ce soit==self.combo1.currentIndex()
        ##parent est un etree.Element, son nom devrait être l'item courant de combo1
        #parent=self.node5[self.combo5.currentIndex()]
        #demander ses enfants,
        ##demander le nom des enfants
#        childrenlist=self.GetChildrenName(parent)
#        print "from changeindex combo5",childrenlist
#        self.combo5.addItems(QtCore.QStringList(childrenlist))
        pathto=os.path.join(str(self.combo1.currentText()),\
                            str(self.combo2.currentText()),\
                            str(self.combo3.currentText()),\
                            str(self.combo4.currentText()),\
                            str(self.combo5.currentText()))
        self.lbl.setText(str(pathto))
        workdir="/home/simon/MFISH/"
        ndimage=imread.imread(workdir+str(pathto))
        convert=qnd.array2qimage(ndimage[::2,::2],normalize=True)
        qpix = QtGui.QPixmap(convert)
        image = QtGui.QLabel(self)
        image.setPixmap(qpix)
        #posit = QtGui.QGridLayout()
        self.posit.addWidget(image, 2, 0)
        self.setLayout(self.posit)
        self.setWindowTitle('Cascade Menus')
        self.show()
    def loadNifti(self):

        # Enable Segment button
        self.Segment_bott.setEnabled(True)

        # disable save jpg button until the two images are loaded(test image and segmented image)
        self.cnvJPG.setEnabled(False)

        # disable show 3d button
        # self.plt3D.setEnabled(False)

        # initalise the x,y coordinates
        self.last_x, self.last_y = None, None

        #self.flagLabelnotempty = 1

        # open browse dialog
        fileName = QFileDialog.getOpenFileName()

        # load nifti from path
        self.loadedNifti = nif.loadSimpleITK(fileName[0])
        self.loadNiftinibabel = nif.loadNifti(fileName[0])

        # clear the label each time we load an image
        self.segmetLabel.clear()
        self.imageTest.clear()

        # normalize the image to darken the nifti slice image
        #self.threshold = 500 # Adjust as needed
        #self.image_2d_scaled = (np.maximum(self.imginit, 0) / (np.amax(self.imginit) + self.threshold)) * 255.0
        #self.img=qimage2ndarray.array2qimage(self.image_2d_scaled)

        # call specific slice
        self.sliceNum, ok = QInputDialog.getInt(
            QtWidgets.QWidget(),
            "Enter slice number",
            "Slice:",
            0,
            1,
            self.loadedNifti.GetSize()[2] - 1,
            1,
        )
        if ok:
            self.imageslice = nif.getSliceITK(self.loadedNifti, self.sliceNum)
        else:
            self.msg.setWindowTitle("Warning")
            self.msg.setInformativeText('You must enter slice number')
            self.msg.exec()

        self.img = qimage2ndarray.array2qimage(self.imageslice)

        # add the image to label
        self.pixmap = QtGui.QPixmap(self.img)
        self.imageTest.setPixmap(self.pixmap)
        self.imageTest.setGeometry(
            QtCore.QRect(
                5, 40,
                self.loadedNifti.GetSize()[0],
                self.loadedNifti.GetSize()[1]))  #(x, y, width, height)
        self.imageTest.mousePressEvent = self.drawMove
Exemple #36
0
def save_averaged_back():
    print("save_averaged_back called.")
    try:
        image = qim.array2qimage(np.abs(back.values[0]), True)
        image.save("average_background.png")
    except:
        print("No background image saved.")
    def Imageshow(self, num):

        if num == 0:  #load버튼을 클릭한 경우
            self.fileName, _ = QFileDialog.getOpenFileName(
                self, "Open File", "")  #Folder dialog 출력

        if self.fileName:
            self.image = QImage(self.fileName)  #클래스 Qimage형 멤버변수 image에 이미지 로드

            if self.image.isNull():
                QMessageBox.information(self, "Image Viewr",
                                        "Cannot load %s." % fileName)
                return

        if num == 1:  #flip버튼을 클릭한 경우
            image_array = qimage2ndarray.rgb_view(
                self.image)  #Qimage를 numpy로 변환
            image_array = np.flip(image_array, 0)  #image_array에 상하반전 수행
            self.image = qimage2ndarray.array2qimage(
                image_array, normalize=False)  #numpy를 Qimage로 변환

        qPixmapVar = QPixmap.fromImage(self.image)  #Qimage를 Qpixmap으로 변환
        qPixmapVar = qPixmapVar.scaled(256, 256)  #이미지의 가로 세로 크기 조절
        self.label.setPixmap(qPixmapVar)  #Label의 영역에 사진 표시

        self.show()
Exemple #38
0
 def _initial_image_size(self):
     ''':return Original size of images.'''
     image = qimage2ndarray.array2qimage(self.frames[0])
     if image.isNull():
         image = QtGui.QImage('images\\unavailable.jpg')
     qimg = QtGui.QPixmap.fromImage(image)
     return QtGui.QPixmap(qimg).size()
Exemple #39
0
    def update_image(self, work_folder, add=True):
        if add:
            self.counter = self.counter + 1
            self.ui.cnt.setText(str(self.counter))

        # read tiff ( need save_type = "tiff") :
        # pixmap_tiff = QtGui.QPixmap(os.path.expanduser(work_folder + "/" + name_of_tiff_image))

        # read tiff ( need save_type = "jpeg") :
        # pixmap_tiff = QtGui.QPixmap(os.path.expanduser(work_folder + "/" +

        # read image in RAM ( need save_type = "no"):
        qimage_tiff = array2qimage(self.image_ref_save.stack_image,
                                   normalize=(2**16 - 1))
        pixmap_tiff = QtGui.QPixmap.fromImage(qimage_tiff)

        if pixmap_tiff.isNull():
            self.ui.log.append(_("invalid frame"))

        pixmap_tiff_resize = pixmap_tiff.scaled(
            self.ui.image_stack.frameGeometry().width(),
            self.ui.image_stack.frameGeometry().height(),
            QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
        self.ui.image_stack.setPixmap(pixmap_tiff_resize)
        self.ui.log.append(_("Updated GUI image"))
        print(_("Updated GUI image"))
Exemple #40
0
def save_holos():
    print("save_holos called.")
    options = QtWidgets.QFileDialog.Options()
    options |= QtWidgets.QFileDialog.DontUseNativeDialog

    try:
        rec_vol
    except:
        print("rec_vol ist not defined!")
        return
    global dirName
    dirName, _ = QtWidgets.QFileDialog.getSaveFileName(
        None,
        "QFileDialog.getSaveFileName()",
        "",
        "Dir of Pic Files (*.png)",
        options=options)
    if dirName:
        try:
            os.stat(dirName)
        except:
            os.mkdir(dirName)
        print("Directorty {} create".format(dirName))
        for i in range(len(rec_vol)):
            image = qim.array2qimage(np.abs(rec_vol.values[i]), True)
            image.save(dirName + '/holo_' + str(i) + '.png')
    def addTestImage(self, color_image):
        self.ocr_areas = OCRAreasFinder(color_image, self.settings["contrast"])
        self.market_width = self.ocr_areas.market_width
        self.valid_market = self.ocr_areas.valid
        if self.settings['gray_preview']:
            img = cv2.imread(unicode(self.hiddentext).encode(sys.getfilesystemencoding()), 0)
            img = array2qimage(img)
            pix = QPixmap.fromImage(img)
        else:
            pix = QPixmap(self.hiddentext)
        width = pix.width()
        height = pix.height()
        if height > 0:
            aspect_ratio = float(width)/height
            if aspect_ratio > 1.78:
                new_w = int(1.77778*height)
                rect = QRect((width-new_w)/2, 0, new_w, height)
                pix = pix.copy(rect)
            
        if self.valid_market:
            points = self.ocr_areas.market_table
            self.market_offset = (points[0][0], points[0][1])
            station = self.ocr_areas.station_name
            self.station_offset = (station[0][0], station[0][1])
            rect = QRect(0, 0, points[1][0] + 20, points[1][1] + 20)
            cut = pix.copy(rect)
            return cut
        else:
            self.market_offset = (0, 0)
            self.station_offset = (0, 0)

        return pix
 def addPreviewImage(self, color_image, parent = None):
     image = color_image
     image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     if not parent is None:
         parent.progress_bar.setValue(12)
     h, w = image.shape
     self.img_height = h
     self.ocr_areas = OCRAreasFinder(color_image, self.settings["contrast"])
     self.market_width = self.ocr_areas.market_width
     if not parent is None:
         parent.progress_bar.setValue(14)
     
     self.valid_market = self.ocr_areas.valid
     if self.valid_market:
         points = self.ocr_areas.market_table
         self.market_offset = (points[0][0], points[0][1])
         station = self.ocr_areas.station_name
         self.station_offset = (station[0][0], station[0][1])
         cut = image[0:points[1][1] + 20,
                     0:points[1][0] + 20]
     else:
         cut = image[:]
         self.market_offset = (0, 0)
         self.station_offset = (0, 0)
         
     processedimage = array2qimage(cut)
     if not parent is None:
         parent.progress_bar.setValue(16)
     pix = QPixmap()
     pix.convertFromImage(processedimage)
     if not parent is None:
         parent.progress_bar.setValue(18)
     return pix
Exemple #43
0
    def displayNext(self):
        while True:
            # no image?
            if self.current_image is None:
                # try to get next
                try:
                    self.current_image = next(self.image_iterator)
                # if not, close window
                except StopIteration:
                    return self.close()
                # get image pixel data and markers
                self.image_data = self.current_image.data8
                self.markers = (m for m in db.getMarkers(
                    image=self.current_image, type=marker_type_name))
            # get the next marker
            try:
                self.current_marker = next(self.markers)
            # or go to the next image
            except StopIteration:
                self.current_image = None
                continue
            break

        # get marker coordinates and image slice
        x = int(self.current_marker.x)
        y = int(self.current_marker.y)
        # cut out image
        if len(self.image_data.shape) == 3:  # in case of color
            data = self.image_data[y - view_o1:y + view_o2,
                                   x - view_o1:x + view_o2, :]
        else:  # in case of black and white
            data = self.image_data[y - view_o1:y + view_o2,
                                   x - view_o1:x + view_o2]
        # and feed data to pixmap, this will automatically display the data
        self.pixmapItem.setPixmap(QtGui.QPixmap(array2qimage(data)))
Exemple #44
0
def Preprocessing(image):
    image_arr = qimage2ndarray.rgb_view(image)

    #Grayscale
    gray_arr = [0.2890, 0.5870, 0.1140]
    image_gray = np.dot(image_arr, gray_arr)

    #Padding
    image_pad = np.pad(image_gray, 1, mode='constant', constant_values=0)

    #Smoothing
    kenel = np.array([[1/273,4/273,7/273,4/273,1/273],
                      [4/273,16/273,26/273,16/273,4/273],
                      [7/273,26/273,41/273,26/273,7/273],
                      [4/273,16/273,26/273,16/273,4/273],
                      [1/273,4/273,7/273,4/273,1/273]])

    a = image_pad.shape[0]-kenel.shape[0] + 1
    b = image_pad.shape[1]-kenel.shape[1] + 1
    result2 = []
    for row in range(a):
        for column in range(b):
            result1 = image_pad[ row : row + kenel.shape[0], column : column + kenel.shape[1] ] * kenel
            result2.append(np.sum(result1))
    result = np.array(result2).reshape(a,b)


    image_after = qimage2ndarray.array2qimage(result, normalize=False)

    return QPixmap.fromImage(image_after)
 def SetImage(self, data):
     import qimage2ndarray
     image = qimage2ndarray.array2qimage(data)
     pixmap = QtGui.QPixmap(image)
     scene = QtGui.QGraphicsScene()
     scene.addPixmap(pixmap)
     self.ui.graphicsView.setScene(scene)
Exemple #46
0
 def paintEvent(self, event):
     paint = QtGui.QPainter()
     paint.begin(self)
     paint.setPen(QtGui.QColor(168, 34, 3))
     paint.setFont(QtGui.QFont('Decorative', 10))
     paint.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
     paint.drawImage(0,0,array2qimage(self.model.dat_s, True))
     paint.end()
Exemple #47
0
def imshow(arr):
    if not isinstance(arr,vigra.VigraArray):
        arr = arr.swapaxes(0,1).view(numpy.ndarray)
    im = qimage2ndarray.array2qimage(arr)
    d = QtGui.QDialog()
    l = QtGui.QLabel(d)
    l.setPixmap(QtGui.QPixmap.fromImage(im))
    d.resize(l.sizeHint())
    d.exec_()
Exemple #48
0
    def setImage(self, name):
        assert isinstance(name, basestring)

        image = self._images[name]
        image = array2qimage(image.toArray(copy=False))
        pixmap = QtGui.QPixmap.fromImage(image)

        self.graphics.setPixmap(
            pixmap.scaled(self.size(), Qt.IgnoreAspectRatio,
                          Qt.SmoothTransformation))
 def CompletedJulia(self, data):
     import qimage2ndarray
     elapsed = self.time.elapsed()
     if elapsed > 1000000:
         self.ui.labelTime.setText("Time: %ds" % (self.time.elapsed() / 1000))
     else:
         self.ui.labelTime.setText("Time: %dms" % self.time.elapsed())
     self.ui.progressBar.setHidden(True)
     self.image = qimage2ndarray.array2qimage(data)
     self.SetImage(self.image)
Exemple #50
0
 def render_image(self, image):
     if self._show_image:
         palette = self._palettes[self._current]
         qimage = array2qimage(image)
         qimage = qimage.convertToFormat(qimage.Format_Indexed8, palette.qt)
     else:
         height, width = image.shape
         qimage = QImage(width, height, QImage.Format_ARGB32_Premultiplied)
         qimage.fill(0)
     return qimage
 def addPreviewImage(self):
     image = self.color_image
     image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     h, w = image.shape
     cut = image[0:self.settings["cal_points"][7]*h + 20,
                 0:self.settings["cal_points"][6]*w + 20]
     processedimage = array2qimage(cut)
     pix = QPixmap()
     pix.convertFromImage(processedimage)
     return pix
 def drawSnippet(self, graphicsview, snippet):
     """Draw single result item to graphicsview"""
     processedimage = array2qimage(snippet)
     pix = QPixmap()
     pix.convertFromImage(processedimage)
     pix = pix.scaled(graphicsview.width(), graphicsview.height()-1, Qt.KeepAspectRatio, Qt.SmoothTransformation)
     scene = QGraphicsScene()
     scene.addPixmap(pix)
     graphicsview.setScene(scene)
     graphicsview.show()
Exemple #53
0
 def toImage( self ):
     a = self._arrayreq.getResult()
     shape = a.shape + (4,)
     d = np.empty(shape, dtype=np.uint8)
     d[:,:,0] = a[:,:]*self._tintColor.redF()
     d[:,:,1] = a[:,:]*self._tintColor.greenF()
     d[:,:,2] = a[:,:]*self._tintColor.blueF()
     d[:,:,3] = a[:,:]
     img = array2qimage(d, self._normalize)
     return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)        
Exemple #54
0
    def toImage( self ):
        a = self._arrayreq.getResult()
        assert a.ndim == 2

        #make sure that a has values in range [0, colortable_length)
        a = np.remainder(a, len(self._colorTable))
        #apply colortable
        img = self._colorTable[a]
        img = array2qimage(img)

        return img 
def test_scalar2qimage():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))    # min pixel
def test_rgb2qimage():
    a = numpy.zeros((240, 320, 3), dtype = float)
    a[12,10] = (42.42, 20, 14)
    a[13,10] = (-10, 0, -14)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,20,14)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
Exemple #57
0
    def process_press(self, event):
        center = event.posF()
        cx = center.x()
        cy = center.y()
        x = cx - self.cursor_size / 2
        y = cy - self.cursor_size / 2
        self.wmr.set_roi(x, y, self.cursor_size, self.cursor_size)
        self.wmr.process()

        img = array2qimage(self.wmr.image[:, :, ::-1])
        self.win.pixmap.convertFromImage(img)
        self.win.label.repaint()
Exemple #58
0
 def toImage( self ):
     for i, req in enumerate(self._requests):
         a = self._requests[i].getResult()
         if self._normalize[i] is not None:
             a = a.astype(np.float32)
             a = (a - self._normalize[i][0])*255.0 / (self._normalize[i][1]-self._normalize[i][0])
             a[a > 255] = 255
             a[a < 0]   = 0
             a = a.astype(np.uint8)
         self._data[:,:,i] = a
     img = array2qimage(self._data)
     return img.convertToFormat(QImage.Format_ARGB32_Premultiplied)        
 def MakeDiffImage(self, input, last): #input is the video stream, last is the last grabbed frame
     #make a QImage with the diff from these two QImages
     import numpy as np
     import qimage2ndarray
     
     npinput = qimage2ndarray.rgb_view(input)
     nplast = qimage2ndarray.rgb_view(last)
     
     #nplast = nplast/2 + npinput/2
     #print type(npinput)
     
     qImage = qimage2ndarray.array2qimage(npinput, normalize = False) # create QImage from ndarray
     return qImage
def test_scalar2qimage_with_alpha():
    a = numpy.zeros((240, 320, 2), dtype = float)
    a[...,1] = 255
    a[12,10] = (42.42, 128)
    a[13,10] = (-10, 0)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_ARGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgba(42,42,42,128))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgba(0,0,0,255)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgba(0,0,0,0)))      # min pixel