Exemplo n.º 1
0
 def get_final_image(self):
     """
     Simple merge operation - just splat the annotation layer on top of
     original image. That's what is displayed, so it should be fine.
     :return: pillow Image
     """
     img = ImageQt.fromqimage(self.bg_img)
     annotations = ImageQt.fromqimage(self.canvas_img)
     img.paste(annotations, mask=annotations)
     # OR: convert to RGBA and .alpha_composite
     # but this is good enough!
     return img
 def magma(self):
     image = ImageQt.fromqimage(self.will_change_img)
     img = self.img_to_cv(image)
     kernel = np.array([[1, 1, 0], [1, 0, -1], [0, -1, -1]])
     new_image = cv2.filter2D(img, -1, kernel)
     new_image = cv2.applyColorMap(new_image, cv2.COLORMAP_HOT)
     self.update_img(new_image)
Exemplo n.º 3
0
    def predict(self):
        image = self.__paintBoard.getImage()
        pil_img = ImageQt.fromqimage(image)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
        # pil_img.save('./images/test66.png')
        # pil_img.show()

        img_array = np.array(pil_img.convert('L')).reshape(784)
        # print(img_array.shape)      # (784,)

        # display image
        # print(img_array)
        plt.imshow(img_array.reshape(28, 28), cmap="binary")
        plt.show()
        # fig = plt.figure(figsize=(6, 6))
        # fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
        # # 绘制数字:每张图像8*8像素点
        # for i in range(64):
        #     ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
        #     ax.imshow(self.xtest[i].reshape(28, 28), cmap=plt.cm.binary, interpolation='nearest')
        #     # 用目标值标记图像
        #     ax.text(0, 7, str(self.ytest[i]))
        # plt.show()

        self.__result = self.model.predict(img_array)
        print("result: %d" % self.__result)
        self.__lb_Result.setText("%d" % self.__result)
Exemplo n.º 4
0
    def contrast(self):
        image = ImageQt.fromqimage(self.will_change_img)
        if image.mode != "RGB":
            image.convert("RGB")
        width, height = image.size
        pixels = image.load()
        avg = 0
        for x in range(width):
            for y in range(height):
                r, g, b = pixels[x, y]
                avg += r * 0.299 + g * 0.587 + b * 0.114
        avg /= image.size[0] * image.size[1]

        palette = []
        for i in range(256):
            temp = int(avg + 2 * (i - avg))
            if temp < 0:
                temp = 0
            elif temp > 255:
                temp = 255
            palette.append(temp)

        for x in range(width):
            for y in range(height):
                r, g, b = pixels[x, y]
                image.putpixel((x, y), (palette[r], palette[g], palette[b]))

        image = self.img_to_cv(image)
        self.update_img(image)
Exemplo n.º 5
0
def MatchFirst(ImgList):
    ImgCutpictures = []
    #ImgGetPictures = []
    ImgCutpictures = walkFile("..\cutPictures")

    #ImgGetPictures = walkFile(".\getPictures")
    #print(ImgCutpictures)
    #print(ImgGetPictures)
    print("ImgList=" + str(ImgList))
    MatchFiles = {}
    for i in ImgList:
        #img1 = Image.open(i)
        image1 = pm.make_regalur_image(ImageQt.fromqimage(i.toImage()))
        for j in ImgCutpictures:
            img2 = Image.open(j)
            #print(img2)
            image2 = pm.make_regalur_image(img2)
            if pm.calc_similar(image1, image2) == 1.0:
                print(str(i) + "找到匹配了" + j)
                #找到图片最大匹配的文件夹
                j = j.replace("..\cutPictures\\", "")
                print(j)
                if (not MatchFiles.get(j[0:j.index("\\")])):
                    MatchFiles[j[0:j.index("\\")]] = 1
                else:
                    MatchFiles[j[0:j.index("\\")]] += 1

                print(pm.calc_similar(image1, image2))
                break
    print(MatchFiles)
    #print(maxMatch(MatchFiles))
    return maxMatch(MatchFiles)
Exemplo n.º 6
0
    def dither(self):
        image = ImageQt.fromqimage(self.will_change_img)
        width, height = image.size
        img = self.create_image(width, height)
        pixels = img.load()
        for i in range(0, width, 2):
            for j in range(0, height, 2):
                # Get Pixels
                p1 = self.get_pixel(image, i, j)
                p2 = self.get_pixel(image, i, j + 1)
                p3 = self.get_pixel(image, i + 1, j)
                p4 = self.get_pixel(image, i + 1, j + 1)

                red = (p1[0] + p2[0] + p3[0] + p4[0]) / 4
                green = (p1[1] + p2[1] + p3[1] + p4[1]) / 4
                blue = (p1[2] + p2[2] + p3[2] + p4[2]) / 4

                r = [0, 0, 0, 0]
                g = [0, 0, 0, 0]
                b = [0, 0, 0, 0]

                for x in range(0, 4):
                    r[x] = self.get_saturation(red, x)
                    g[x] = self.get_saturation(green, x)
                    b[x] = self.get_saturation(blue, x)

                pixels[i, j] = (r[0], g[0], b[0])
                pixels[i, j + 1] = (r[1], g[1], b[1])
                pixels[i + 1, j] = (r[2], g[2], b[2])
                pixels[i + 1, j + 1] = (r[3], g[3], b[3])

        image = self.img_to_cv(img)
        self.update_img(image)
        return img
Exemplo n.º 7
0
    def burgundy(self):
        image = ImageQt.fromqimage(self.will_change_img)
        img_rgb = self.img_to_cv(image)
        aspect_ratio = img_rgb.shape[1] / img_rgb.shape[0]
        window_width = 500 / aspect_ratio
        image = cv2.resize(img_rgb, (500, int(window_width)))
        img_color = image
        newImage = img_color.copy()
        i, j, k = img_color.shape
        for x in range(i):
            for y in range(j):
                R = img_color[x, y, 2] * 0.125 + img_color[x, y, 1] * 0.102 + img_color[x, y, 0] * 0.135
                G = img_color[x, y, 2] * 0.256 + img_color[x, y, 1] * 0.106 + img_color[x, y, 0] * 0.96
                B = img_color[x, y, 2] * 0.565 + img_color[x, y, 1] * 0.300 + img_color[x, y, 0] * 0.206
                if R > 255:
                    newImage[x, y, 2] = 102
                else:
                    newImage[x, y, 2] = B
                if G > 255:
                    newImage[x, y, 1] = 102
                else:
                    newImage[x, y, 1] = R
                if B > 255:
                    newImage[x, y, 0] = 102
                else:
                    newImage[x, y, 0] = R

        self.update_img(newImage)
Exemplo n.º 8
0
    def test_sanity(self):
        for mode in ('RGB', 'RGBA', 'L', 'P', '1'):
            src = hopper(mode)
            data = ImageQt.toqimage(src)

            self.assertIsInstance(data, QImage)
            self.assertFalse(data.isNull())

            # reload directly from the qimage
            rt = ImageQt.fromqimage(data)
            if mode in ('L', 'P', '1'):
                self.assert_image_equal(rt, src.convert('RGB'))
            else:
                self.assert_image_equal(rt, src)

            if mode == '1':
                # BW appears to not save correctly on QT4 and QT5
                # kicks out errors on console:
                #     libpng warning: Invalid color type/bit depth combination
                #                     in IHDR
                #     libpng error: Invalid IHDR data
                continue

            # Test saving the file
            tempfile = self.tempfile('temp_{}.png'.format(mode))
            data.save(tempfile)

            # Check that it actually worked.
            reloaded = Image.open(tempfile)
            # Gray images appear to come back in palette mode.
            # They're roughly equivalent
            if QT_VERSION == 4 and mode == 'L':
                src = src.convert('P')
            self.assert_image_equal(reloaded, src)
Exemplo n.º 9
0
    def cartoon(self):
        samp = 2
        filternum = 50

        image = ImageQt.fromqimage(self.will_change_img)
        img = self.img_to_cv(image)

        for _ in range(samp):
            img = cv2.pyrDown(img)

        for _ in range(filternum):
            img = cv2.bilateralFilter(img, 9, 9, 7)

        for _ in range(samp):
            img = cv2.pyrUp(img)
        img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 3)

        img_edge = cv2.adaptiveThreshold(img_blur, 255,
                                         cv2.ADAPTIVE_THRESH_MEAN_C,
                                         cv2.THRESH_BINARY, 9, 2)
        (x, y, z) = img.shape
        img_edge = cv2.resize(img_edge, (y, x))
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        self.temp = cv2.bitwise_and(img, img_edge)
        self.update_img(self.temp)
Exemplo n.º 10
0
    def test_sanity(self):
        for mode in ('RGB', 'RGBA', 'L', 'P', '1'):
            src = hopper(mode)
            data = ImageQt.toqimage(src)

            self.assertIsInstance(data, QImage)
            self.assertFalse(data.isNull())

            # reload directly from the qimage
            rt = ImageQt.fromqimage(data)
            if mode in ('L', 'P', '1'):
                self.assert_image_equal(rt, src.convert('RGB'))
            else:
                self.assert_image_equal(rt, src)

            if mode == '1':
                # BW appears to not save correctly on QT4 and QT5
                # kicks out errors on console:
                #     libpng warning: Invalid color type/bit depth combination
                #                     in IHDR
                #     libpng error: Invalid IHDR data
                continue

            # Test saving the file
            tempfile = self.tempfile('temp_{}.png'.format(mode))
            data.save(tempfile)

            # Check that it actually worked.
            reloaded = Image.open(tempfile)
            # Gray images appear to come back in palette mode.
            # They're roughly equivalent
            if QT_VERSION == 4 and mode == 'L':
                src = src.convert('P')
            self.assert_image_equal(reloaded, src)
Exemplo n.º 11
0
 def run(self):
     hwnd = 0
     while True:
         try:
             if hwnd == 0:
                 hwnd = get_hwnd()
             if hwnd == -1:
                 self.update_rec.emit('句柄无效,请打开AirPlayer并连接')
                 time.sleep(1)
                 hwnd = get_hwnd()
                 continue
             screen = QApplication.primaryScreen()
             try:
                 raw_image = screen.grabWindow(hwnd).toImage()
                 PIL_image = ImageQt.fromqimage(raw_image)
                 CV_image = cv2.cvtColor(np.asarray(PIL_image),
                                         cv2.COLOR_RGB2BGR)
             except Exception as e:
                 hwnd = 0
                 continue
             labeled_image = self.image_process(CV_image)
             labeled_image = Image.fromarray(
                 cv2.cvtColor(labeled_image, cv2.COLOR_BGR2RGB))
             labeled_image = ImageQt.ImageQt(labeled_image)
             self.update_img.emit(labeled_image.copy())
             time.sleep(1 / 15)
         except Exception as e:
             print(e)
Exemplo n.º 12
0
    def on_btn_outport_clicked(self):
        if len(self.filelist)==0:
            self.fWindow.statusBar().showMessage('请先导入图片!')
            return

        self.fWindow.statusBar().showMessage('开始保存!')

        config = configparser.ConfigParser()
        config.read("init.ini", encoding="utf8")
        outportPath=config.get("IMG",'imgsavepath')
        
        if not os.path.exists(outportPath):
            os.makedirs(outportPath)

        for i in self.filelist:
            # img=QImage(i)
            cvimg = self.cv_imread(i.filepath,1)
            img = self.CVMat2QImage(cvimg)
            img = self.drawImage(img, i.text,i.textsize,i.textoffset,
                           i.colorr,i.colorg,i.colorb)
            #img.save(outportPath+'/'+i.filename.split('.')[0]+'.png')
            pimg=ImageQt.fromqimage(img)
            pimg.save(outportPath+'/'+i.filename)

        self.fWindow.statusBar().showMessage('保存结束!')
Exemplo n.º 13
0
    def pbtPredict_Callback(self):
        __img, img_array =[],[]      # 将图像统一从qimage->pil image -> np.array [1, 1, 28, 28]
        
        # 获取qimage格式图像
        if self.mode == MODE_MNIST:
            __img = self.lbDataArea.pixmap()  # label内若无图像返回None
            if __img == None:   # 无图像则用纯黑代替
                # __img = QImage(224, 224, QImage.Format_Grayscale8)
                __img = ImageQt.ImageQt(Image.fromarray(np.uint8(np.zeros([224,224]))))
            else: __img = __img.toImage()
        elif self.mode == MODE_WRITE:
            __img = self.paintBoard.getContentAsQImage()

        # 转换成pil image类型处理
        pil_img = ImageQt.fromqimage(__img)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
        
        # pil_img.save('test.png')

        img_array = np.array(pil_img.convert('L')).reshape(1,1,28, 28) / 255.0
        # img_array = np.where(img_array>0.5, 1, 0)
    
        # reshape成网络输入类型 
        __result = network.predict(img_array)      # shape:[1, 10]

        # print (__result)

        # 将预测结果使用softmax输出
        __result = softmax(__result)
       
        self.result[0] = np.argmax(__result)          # 预测的数字
        self.result[1] = __result[0, self.result[0]]     # 置信度

        self.lbResult.setText("%d" % (self.result[0]))
        self.lbCofidence.setText("%.8f" % (self.result[1]))
Exemplo n.º 14
0
    def test_sanity(self):
        for mode in ("RGB", "RGBA", "L", "P", "1"):
            src = hopper(mode)
            data = ImageQt.toqimage(src)

            assert isinstance(data, QImage)
            assert not data.isNull()

            # reload directly from the qimage
            rt = ImageQt.fromqimage(data)
            if mode in ("L", "P", "1"):
                assert_image_equal(rt, src.convert("RGB"))
            else:
                assert_image_equal(rt, src)

            if mode == "1":
                # BW appears to not save correctly on QT4 and QT5
                # kicks out errors on console:
                #     libpng warning: Invalid color type/bit depth combination
                #                     in IHDR
                #     libpng error: Invalid IHDR data
                continue

            # Test saving the file
            tempfile = self.tempfile("temp_{}.png".format(mode))
            data.save(tempfile)

            # Check that it actually worked.
            with Image.open(tempfile) as reloaded:
                assert_image_equal(reloaded, src)
Exemplo n.º 15
0
    def pbtPredict_Callback(self):
        __img, img_array = [], [
        ]  # 将图像统一从qimage->pil image -> np.array [1, 1, 28, 28]

        # 获取qimage格式图像
        if self.mode == MODE_MNIST:
            __img = self.lbDataArea.pixmap()  # label内若无图像返回None
            if __img == None:  # 无图像则用纯黑代替
                # __img = QImage(224, 224, QImage.Format_Grayscale8)
                __img = ImageQt.ImageQt(
                    Image.fromarray(np.uint8(np.zeros([224, 224]))))
            else:
                __img = __img.toImage()
        elif self.mode == MODE_WRITE:
            __img = self.paintBoard.getContentAsQImage()

        # 转换成pil image类型处理
        pil_img = ImageQt.fromqimage(__img)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)

        img_array = np.array(pil_img.convert('L')).reshape(1, 1, 28, 28)
        img = normalize(img_array)
        img = paddle.Tensor(img)
        __result = network(img)
        argmax__result = paddle.argmax(__result).numpy()

        self.result[0] = argmax__result[0]  # 置信度

        m = F.sigmoid(__result).numpy()

        self.result[1] = m[0][self.result[0]]

        self.lbResult.setText("%d" % (self.result[0]))
        self.lbCofidence.setText("%.8f" % (self.result[1]))
Exemplo n.º 16
0
    def daVinci(self):
        img = ImageQt.fromqimage(self.will_change_img)
        pixels = list(img.getdata())
        width, height = img.size
        image_array = []
        for j in range(height):
            row = []
            for i in range(width):
                if i < width - 1 and (abs(pixels[j * width + i][0] - pixels[j * width + i + 1][0])
                                      + abs(pixels[j * width + i][1] - pixels[j * width + i + 1][1])
                                      + abs(
                            pixels[j * width + i][2] - pixels[j * width + i + 1][2])) / 3 > 35:

                    row.append((255, 140, i // ((width + 255) // 255)))

                elif j < height - 1 and (abs(pixels[j * width + i][0] - pixels[(j + 1) * width + i][0])
                                         + abs(pixels[j * width + i][1] - pixels[(j + 1) * width + i][1])
                                         + abs(
                            pixels[j * width + i][2] - pixels[(j + 1) * width + i][0])) / 3 > 35:

                    row.append((255, 140, i // ((width + 255) // 255)))

                else:
                    row.append((102,102,255))

            image_array.append(row)
        array = np.array(image_array, dtype=np.uint8)
        new_image = Image.fromarray(array)
        image = self.img_to_cv(new_image)
        self.update_img(image)
Exemplo n.º 17
0
    def loadImage(self):
        if self.area_state:  # 画板
            image = self.paintboard.getImage()
        else:  # 图片
            image = self.image_area.pixmap().toImage()

        image = ImageQt.fromqimage(image)
        return image
    def pbtPredict_Callback(self):
        __img, img_array =[],[]      # 将图像统一从qimage->pil image -> np.array [1, 1, 28, 28]
        
        # 获取qimage格式图像
        if self.mode == MODE_MNIST: #随机抽取
            __img = self.lbDataArea.pixmap()  # label内若无图像返回None
            if __img == None:   # 无图像则用纯黑代替
                # __img = QImage(224, 224, QImage.Format_Grayscale8)
                __img = ImageQt.ImageQt(Image.fromarray(np.uint8(np.zeros([224,224]))))
            else: __img = __img.toImage()
            pil_img = ImageQt.fromqimage(__img)
            pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
            img_array = np.array(pil_img.convert('L')).reshape(1,1,28, 28) / 255.0
            
        elif self.mode == MODE_WRITE:
            __img = self.paintBoard.getContentAsQImage()
            pil_img = ImageQt.fromqimage(__img)
            pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
            pil_img.save('test.png')
            img_array = np.array(pil_img.convert('L')).reshape(1,1,28, 28) / 255.0
            
        elif self.mode == MODE_PICTURE:
           
            pic = image_prepare(self.imgName)   # 得到28*28的数组
            img_array = pic.reshape(1,1,28, 28) / 255.0 # 转换成神经网络要求的输入格式
            '''
                关于这部分有一点需要说明:
                    开始通过image_prepare得到预处理的图片之后,最开始是变成一维数组,然后一直想着变成二维数组并进行灰度反转。
                    为什么会这样想呢,因为我开始在变成reshape(1,1,28, 28)格式时采用的是网上的另一种方法,以至于格式基本是没有变化的。
                    其实直接变成一维数组然后进行上面的转换也是一样可以的,这个部分的失误浪费了很长时间!以后debug时需要多注意。
            '''
        # reshape成网络输入类型 
        __result = network.predict(img_array)      # shape:[1, 10]

        # print (__result)

        # 将预测结果使用softmax输出,得到输出结果
        __result = softmax(__result)
       
        self.result[0] = np.argmax(__result)          # 预测的数字
        self.result[1] = __result[0, self.result[0]]     # 置信度

        # 结果显示在界面上
        self.lbResult.setText("%d" % (self.result[0]))
        self.lbCofidence.setText("%.8f" % (self.result[1]))
Exemplo n.º 19
0
 def crop(self):
     try:
         self.preview_img_data = image_processing.Crop(QtGui.QPixmap.fromImage(self.image))
         self.image = self.preview_img_data.toImage()
         self.imgData = ImageQt.fromqimage(self.image)
         self.Set_Filter_Label()
         self.Show()
     except:
         pass
    def save_clicked_callback(self):
        image = self.writeBoard.getContentAsQImage()
        # 转换成pil image类型处理
        pil_img = ImageQt.fromqimage(image)
        #pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
        print(pil_img.size)
        #pil_img.save('test.png')
        #pil_img = pil_img.convert('1')

        GetFeature(pil_img)
Exemplo n.º 21
0
    def _writeFrame(self, surface: QImage):
        w = self.stream.width
        h = self.stream.height
        surface = surface.scaled(w, h) if self.scale else surface
        frame = av.VideoFrame.from_image(ImageQt.fromqimage(surface))

        # Draw the mouse pointer. Render mouse clicks?
        p = QPainter(surface)
        p.setBrush(QColor.fromRgb(255, 255, 0, 180))
        (x, y) = self.mouse
        p.drawEllipse(x, y, 5, 5)
        p.end()

        # Output frame.
        frame = av.VideoFrame.from_image(ImageQt.fromqimage(surface))
        for packet in self.stream.encode(frame):
            if self.progress:
                self.progress()
            self.mp4.mux(packet)
Exemplo n.º 22
0
 def dream(self):
     image = ImageQt.fromqimage(self.will_change_img)
     img = self.img_to_cv(image)
     element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))
     b, g, r = cv2.split(img)
     rbr_img = cv2.merge((r, b, g))
     morphology = cv2.morphologyEx(rbr_img, cv2.MORPH_OPEN, element)
     canvas = cv2.normalize(morphology, None, 20, 255, cv2.NORM_MINMAX)
     new_image = cv2.stylization(canvas, sigma_s=60, sigma_r=0.6)
     self.update_img(new_image)
Exemplo n.º 23
0
    def roundtrip(self, expected):
        # PIL -> Qt
        intermediate = expected.toqimage()
        # Qt -> PIL
        result = ImageQt.fromqimage(intermediate)

        if intermediate.hasAlphaChannel():
            self.assert_image_equal(result, expected.convert('RGBA'))
        else:
            self.assert_image_equal(result, expected.convert('RGB'))
Exemplo n.º 24
0
    def roundtrip(self, expected):
        # PIL -> Qt
        intermediate = expected.toqimage()
        # Qt -> PIL
        result = ImageQt.fromqimage(intermediate)

        if intermediate.hasAlphaChannel():
            self.assert_image_equal(result, expected.convert("RGBA"))
        else:
            self.assert_image_equal(result, expected.convert("RGB"))
Exemplo n.º 25
0
 def cotton_candy(self):
     image = ImageQt.fromqimage(self.will_change_img)
     img = self.img_to_cv(image)
     b, g, r = cv2.split(img)
     b = b * 1.35
     g = g * 0.5
     r = r * 2.25
     rbr_img = cv2.merge((r, g, b))
     image = cv2.convertScaleAbs(rbr_img, alpha=1.2, beta=-20)
     image = cv2.bilateralFilter(image, 9, 75, 75)
     self.update_img(image)
    def brightness_img(self):
        bright_val = self.brightness_slider.value()
        self.brightness_text.setText(str(bright_val))
        image = ImageQt.fromqimage(self.will_change_img)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        enhancer = ImageEnhance.Brightness(image)
        image = enhancer.enhance(bright_val / 10)

        image = self.img_to_cv(image)
        self.update_img(image)
Exemplo n.º 27
0
    def handDraw(self):
        image = ImageQt.fromqimage(self.will_change_img)
        imga = self.img_to_cv(image)
        gray = cv2.cvtColor(imga, cv2.COLOR_BGR2GRAY)
        invert = cv2.bitwise_not(gray)
        smooth = cv2.GaussianBlur(invert, (21,21),sigmaX=0, sigmaY=0)

        def dodge(x, y):
            return cv2.divide(x, 255 - y, scale=256)
        img = dodge(gray, smooth)
        self.update_img(img)
    def change_contrast(self):
        contrast_value = self.contrast_slider.value()
        self.contrast_text.setText(str(contrast_value))
        image = ImageQt.fromqimage(self.will_change_img)

        if image.mode != 'RGB':
            image = image.convert('RGB')
        enhancer = ImageEnhance.Contrast(image)
        image = enhancer.enhance(contrast_value / 10)
        image = self.img_to_cv(image)
        self.update_img(image)
Exemplo n.º 29
0
    def predict(self):
        image = self.__paintBoard.getImage()
        pil_img = ImageQt.fromqimage(image)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
        img_array = np.array(pil_img.convert('L')).reshape(784)

        plt.imshow(img_array.reshape(28, 28), cmap="binary")

        self.__result = self.model.predict(img_array)
        print("result: %d" % self.__result)
        self.__lb_Result.setText("%d" % self.__result)
        plt.show()
Exemplo n.º 30
0
def test_image():
    modes = ["1", "RGB", "RGBA", "L", "P"]
    qt_format = ImageQt.QImage.Format if ImageQt.qt_version == "6" else ImageQt.QImage
    if hasattr(qt_format, "Format_Grayscale16"):  # Qt 5.13+
        modes.append("I;16")

    for mode in modes:
        im = hopper(mode)
        roundtripped_im = ImageQt.fromqimage(ImageQt.ImageQt(im))
        if mode not in ("RGB", "RGBA"):
            im = im.convert("RGB")
        assert_image_similar(roundtripped_im, im, 1)
Exemplo n.º 31
0
 def focus(self):
     image = ImageQt.fromqimage(self.will_change_img)
     image = self.img_to_cv(image)
     rows, cols = image.shape[:2]
     X_kernel = cv2.getGaussianKernel(cols, 200)
     Y_kernel = cv2.getGaussianKernel(rows, 200)
     kernel = Y_kernel * X_kernel.T
     mask = 255 * kernel / np.linalg.norm(kernel)
     output = np.copy(image)
     for i in range(3):
         output[:, :, i] = output[:, :, i] * mask
     self.update_img(output)
Exemplo n.º 32
0
    def beedril_filter(self):
        image = ImageQt.fromqimage(self.will_change_img)
        if image.mode != "RGBA":
            image.convert("RGBA")

        width, height = image.size
        pixels = image.load()

        for i in range(0, width, 2):
            for j in range(0, height, 2):
                p1 = self.get_pixel(image, i, j)
                p2 = self.get_pixel(image, i, j + 1)
                p3 = self.get_pixel(image, i + 1, j)
                p4 = self.get_pixel(image, i + 1, j + 1)

                # to grayscale
                gray1 = (p1[0] * 0.299) + (p1[1] * 0.587) + (p1[2] * 0.114)
                gray2 = (p2[0] * 0.299) + (p2[1] * 0.587) + (p2[2] * 0.114)
                gray3 = (p3[0] * 0.299) + (p3[1] * 0.587) + (p3[2] * 0.114)
                gray4 = (p4[0] * 0.299) + (p4[1] * 0.587) + (p4[2] * 0.114)

                # Saturation
                sat = (gray1 + gray2 + gray3 + gray4) / 4

                if sat > 223:
                    pixels[i, j] = (255, 208, 2)
                    pixels[i, j + 1] = (255, 208, 2)
                    pixels[i + 1, j] = (255, 208, 2)
                    pixels[i + 1, j + 1] = (255, 208, 2)
                elif sat > 159:
                    pixels[i, j] = (255, 255, 255)
                    pixels[i, j + 1] = (0, 0, 0)
                    pixels[i + 1, j] = (255, 255, 255)
                    pixels[i + 1, j + 1] = (255, 255, 255)
                elif sat > 95:
                    pixels[i, j] = (182, 66, 30)  # (248, 142, 33) orange
                    pixels[i, j + 1] = (182, 66, 30)
                    pixels[i + 1, j] = (182, 66, 30)
                    pixels[i + 1, j + 1] = (182, 66, 30)
                elif sat > 32:
                    pixels[i, j] = (0, 0, 0)
                    pixels[i, j + 1] = (255, 255, 255)
                    pixels[i + 1, j] = (0, 0, 0)
                    pixels[i + 1, j + 1] = (0, 0, 0)
                else:
                    pixels[i, j] = (0, 0, 0)
                    pixels[i, j + 1] = (0, 0, 0)
                    pixels[i + 1, j] = (0, 0, 0)
                    pixels[i + 1, j + 1] = (0, 0, 0)

        image = self.img_to_cv(image)
        self.update_img(image)
Exemplo n.º 33
0
 def roundtrip(self, expected):
     result = ImageQt.fromqimage(expected.toqimage())
     # Qt saves all images as rgb
     self.assert_image_equal(result, expected.convert('RGB'))