Example #1
0
    def __init__(self, opt):

        super().__init__()
        self.init_deep_model(opt)

        self.setupUi(self)
        self.show()

        self.modes = 0
        self.alpha = 1

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes, self)
        self.scene.setSceneRect(0, 0, 512, 512)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(Qt.AlignCenter)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.GT_scene = QGraphicsScene()
        self.graphicsView_GT.setScene(self.GT_scene)
        self.graphicsView_GT.setAlignment(Qt.AlignCenter)
        self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_GT.setHorizontalScrollBarPolicy(
            Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)

        self.init_screen()
Example #2
0
    def __init__(self, model, config):
        super().__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.config = config
        self.model.load_demo_graph(config)

        self.output_img = None

        self.mat_img = None

        self.ld_mask = None
        self.ld_sk = None

        self.modes = [0,0,0]
        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None
Example #3
0
    def __init__(self, model, config):
        super().__init__()

        #self.get_head_outline()
        self._step_counter = 1

        # start camera
        self.record_video = RecordVideo()
        # connect the frame data signal and slot together
        self.record_video.frame_data.connect(self.camera_data_slot)

        # start face detector
        self.detector = MTCNNDetector(device='cpu')

        self.setupUi(self)
        self.show()
        self.model = model
        self.config = config
        self.model.load_demo_graph(config)

        self.output_img = None

        self.mat_img = None

        self.ld_mask = None
        self.ld_sk = None

        self._frame_data = None

        self.modes = [0, 0, 0]
        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(QtCore.Qt.AlignTop
                                       | QtCore.Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)

        self.result_scene = QtWidgets.QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(QtCore.Qt.AlignTop
                                         | QtCore.Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)

        self.dlg = QtWidgets.QColorDialog(self.graphicsView)
        self.color = None
Example #4
0
    def __init__(self, args):

        super().__init__()

        self.args = args
        self.current_style = 0
        if self.args.load_network:
            import torch
            from sofgan import init_deep_model
            self.styles, self.generator = init_deep_model(
                '../modules/sofgan.pt')
            self.noise = [
                getattr(self.generator.noises, f'noise_{i}')
                for i in range(self.generator.num_layers)
            ]

        self.setupUi(self)
        self.show()

        self.modes = 0
        self.alpha = 0.5

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes, self)
        self.scene.setSceneRect(0, 0, 512, 512)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.GT_scene = QGraphicsScene()
        self.graphicsView_GT.setScene(self.GT_scene)
        self.graphicsView_GT.setAlignment(Qt.AlignCenter)
        self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_GT.setHorizontalScrollBarPolicy(
            Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)

        self.init_screen()
Example #5
0
    def __init__(self, model, opt):
        super(Ex, self).__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.opt = opt

        self.output_img = None

        self.mat_img = None

        self.mode = 0
        self.size = 6
        self.mask = None
        self.mask_m = None
        self.img = None

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.mode, self.size)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.ref_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.ref_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_3.setScene(self.result_scene)
        self.graphicsView_3.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None
Example #6
0
class Ex(QWidget, Ui_Form):
    def __init__(self, model, opt):
        super(Ex, self).__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.opt = opt

        self.output_img = None

        self.mat_img = None

        self.mode = 0
        self.size = 6
        self.mask = None
        self.mask_m = None
        self.img = None

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.mode, self.size)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.ref_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.ref_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_3.setScene(self.result_scene)
        self.graphicsView_3.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None

    def open(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  QDir.currentPath())
        if fileName:
            image = QPixmap(fileName)
            mat_img = Image.open(fileName)
            self.img = mat_img.copy()
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return
            image = image.scaled(self.graphicsView.size(),
                                 Qt.IgnoreAspectRatio)

            if len(self.ref_scene.items()) > 0:
                self.ref_scene.removeItem(self.ref_scene.items()[-1])
            self.ref_scene.addPixmap(image)
            if len(self.result_scene.items()) > 0:
                self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(image)

    def open_mask(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  QDir.currentPath())
        if fileName:
            mat_img = cv2.imread(fileName)
            self.mask = mat_img.copy()
            self.mask_m = mat_img
            mat_img = mat_img.copy()
            image = QImage(mat_img, 512, 512, QImage.Format_RGB888)

            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            for i in range(512):
                for j in range(512):
                    r, g, b, a = image.pixelColor(i, j).getRgb()
                    image.setPixel(i, j, color_list[r].rgb())

            pixmap = QPixmap()
            pixmap.convertFromImage(image)
            self.image = pixmap.scaled(self.graphicsView.size(),
                                       Qt.IgnoreAspectRatio)
            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.image)

    def bg_mode(self):
        self.scene.mode = 0

    def skin_mode(self):
        self.scene.mode = 1

    def nose_mode(self):
        self.scene.mode = 2

    def eye_g_mode(self):
        self.scene.mode = 3

    def l_eye_mode(self):
        self.scene.mode = 4

    def r_eye_mode(self):
        self.scene.mode = 5

    def l_brow_mode(self):
        self.scene.mode = 6

    def r_brow_mode(self):
        self.scene.mode = 7

    def l_ear_mode(self):
        self.scene.mode = 8

    def r_ear_mode(self):
        self.scene.mode = 9

    def mouth_mode(self):
        self.scene.mode = 10

    def u_lip_mode(self):
        self.scene.mode = 11

    def l_lip_mode(self):
        self.scene.mode = 12

    def hair_mode(self):
        self.scene.mode = 13

    def hat_mode(self):
        self.scene.mode = 14

    def ear_r_mode(self):
        self.scene.mode = 15

    def neck_l_mode(self):
        self.scene.mode = 16

    def neck_mode(self):
        self.scene.mode = 17

    def cloth_mode(self):
        self.scene.mode = 18

    def increase(self):
        if self.scene.size < 15:
            self.scene.size += 1

    def decrease(self):
        if self.scene.size > 1:
            self.scene.size -= 1

    def edit(self):
        for i in range(19):
            self.mask_m = self.make_mask(self.mask_m,
                                         self.scene.mask_points[i],
                                         self.scene.size_points[i], i)

        params = get_params(self.opt, (512, 512))
        transform_mask = get_transform(self.opt,
                                       params,
                                       method=Image.NEAREST,
                                       normalize=False,
                                       normalize_mask=True)
        transform_image = get_transform(self.opt, params)

        mask = self.mask.copy()
        mask_m = self.mask_m.copy()

        mask = transform_mask(Image.fromarray(np.uint8(mask)))
        mask_m = transform_mask(Image.fromarray(np.uint8(mask_m)))
        img = transform_image(self.img)

        start_t = time.time()
        generated = model.inference(torch.FloatTensor([mask_m.numpy()]),
                                    torch.FloatTensor([mask.numpy()]),
                                    torch.FloatTensor([img.numpy()]))
        end_t = time.time()
        print('inference time : {}'.format(end_t - start_t))
        #save_image((generated.data[0] + 1) / 2,'./results/1.jpg')
        result = generated.permute(0, 2, 3, 1)
        result = result.cpu().numpy()
        result = (result + 1) * 127.5
        result = np.asarray(result[0, :, :, :], dtype=np.uint8)
        qim = QImage(result.data, result.shape[1], result.shape[0],
                     result.strides[0], QImage.Format_RGB888)

        #for i in range(512):
        #    for j in range(512):
        #       r, g, b, a = image.pixelColor(i, j).getRgb()
        #       image.setPixel(i, j, color_list[r].rgb())
        if len(self.result_scene.items()) > 0:
            self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(QPixmap.fromImage(qim))

    def make_mask(self, mask, pts, sizes, color):
        if len(pts) > 0:
            for idx, pt in enumerate(pts):
                cv2.line(mask, pt['prev'], pt['curr'], (color, color, color),
                         sizes[idx])
        return mask

    def save_img(self):
        if type(self.output_img):
            fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
                                                      QDir.currentPath())
            cv2.imwrite(fileName + '.jpg', self.output_img)

    def undo(self):
        self.scene.undo()

    def clear(self):
        self.mask_m = self.mask.copy()

        self.scene.reset_items()
        self.scene.reset()
        if type(self.image):
            self.scene.addPixmap(self.image)
Example #7
0
    def __init__(self, model, opt):
        super(Ex, self).__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.opt = opt
        self.img_size = 512
        self.root_dir = opt.demo_data_dir
        self.save_dir = opt.results_dir

        self.output_img = None

        self.mat_img = None

        self.mode = 0
        self.size = 6
        self.mask = None
        self.mask_m = None
        self.tag_img = None
        self.recon_tag_img = None
        self.ref_img = None
        self.ref_mask_path = None
        self.orient = None
        self.orient_m = None
        self.orient_mask = None
        self.orient_image = None
        self.mask_hole = None
        self.mask_stroke = None
        self.orient_stroke = None
        self.save_datas = {}

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.mode, self.size)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.orient_scene = GraphicsScene(self.mode, self.size)
        self.graphicsView_2.setScene(self.orient_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.ref_scene = QGraphicsScene()
        self.graphicsView_5.setScene(self.ref_scene)
        self.graphicsView_5.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_5.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_5.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_3.setScene(self.result_scene)
        self.graphicsView_3.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.tag_scene = QGraphicsScene()
        self.graphicsView_4.setScene(self.tag_scene)
        self.graphicsView_4.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_4.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_4.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None
Example #8
0
class Ex(QWidget, Ui_Form):
    def __init__(self, model, opt):
        super(Ex, self).__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.opt = opt
        self.img_size = 512
        self.root_dir = opt.demo_data_dir
        self.save_dir = opt.results_dir

        self.output_img = None

        self.mat_img = None

        self.mode = 0
        self.size = 6
        self.mask = None
        self.mask_m = None
        self.tag_img = None
        self.recon_tag_img = None
        self.ref_img = None
        self.ref_mask_path = None
        self.orient = None
        self.orient_m = None
        self.orient_mask = None
        self.orient_image = None
        self.mask_hole = None
        self.mask_stroke = None
        self.orient_stroke = None
        self.save_datas = {}

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.mode, self.size)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.orient_scene = GraphicsScene(self.mode, self.size)
        self.graphicsView_2.setScene(self.orient_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.ref_scene = QGraphicsScene()
        self.graphicsView_5.setScene(self.ref_scene)
        self.graphicsView_5.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_5.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_5.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_3.setScene(self.result_scene)
        self.graphicsView_3.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.tag_scene = QGraphicsScene()
        self.graphicsView_4.setScene(self.tag_scene)
        self.graphicsView_4.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_4.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_4.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None

    def open_ref(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  self.opt.demo_data_dir)
        if fileName:
            image_name = fileName.split("/")[-1]
            image = QPixmap(fileName)
            mat_img = Image.open(fileName)
            self.ref_img = mat_img.copy()
            self.ref_mask_path = os.path.join(self.root_dir, "labels",
                                              image_name[:-4] + ".png")

            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return
            image = image.scaled(self.graphicsView_5.size(),
                                 Qt.IgnoreAspectRatio)

            if len(self.ref_scene.items()) > 0:
                self.ref_scene.removeItem(self.ref_scene.items()[-1])
            self.ref_scene.addPixmap(image)
            # if len(self.result_scene.items()) > 0:
            #     self.result_scene.removeItem(self.result_scene.items()[-1])
            # self.result_scene.addPixmap(image)

    def open_tag(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  self.opt.demo_data_dir)
        if fileName:
            image_name = fileName.split("/")[-1]
            image = QPixmap(fileName)
            mat_img = Image.open(fileName)

            recon_dir = os.path.join(self.opt.demo_data_dir, "images_recon",
                                     image_name)
            if os.path.exists(recon_dir):
                recon_img = Image.open(recon_dir)
                self.recon_tag_img = recon_img.copy()
            else:
                self.recon_tag_img = None

            self.tag_img = mat_img.copy()
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return
            image1 = image.scaled(self.graphicsView_4.size(),
                                  Qt.IgnoreAspectRatio)

            if len(self.tag_scene.items()) > 0:
                self.tag_scene.removeItem(self.tag_scene.items()[-1])
            self.tag_scene.addPixmap(image1)
            if len(self.result_scene.items()) > 0:
                self.result_scene.removeItem(self.result_scene.items()[-1])
            image2 = image.scaled(self.graphicsView_3.size(),
                                  Qt.IgnoreAspectRatio)
            self.result_scene.addPixmap(image2)

            # process mask and orient by default
            mat_mask = cv2.imread(
                os.path.join(self.root_dir, "labels",
                             image_name[:-4] + ".png"))

            self.mask = mat_mask.copy()  # original mask
            self.mask_m = mat_mask  # edited mask
            mat_mask = mat_mask.copy()
            mask = QImage(mat_mask, self.img_size, self.img_size,
                          QImage.Format_RGB888)

            if mask.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            for i in range(self.img_size):
                for j in range(self.img_size):
                    r, g, b, a = mask.pixelColor(i, j).getRgb()
                    mask.setPixel(i, j, color_list[r].rgb())

            pixmap = QPixmap()
            pixmap.convertFromImage(mask)
            self.mask_show = pixmap.scaled(self.graphicsView.size(),
                                           Qt.IgnoreAspectRatio)
            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.mask_show)

            # for orient
            mat_img = cv2.imread(
                os.path.join(self.root_dir, "orients",
                             image_name[:-4] + "_orient_dense.png"),
                cv2.IMREAD_GRAYSCALE,
            )
            orient_mask = cv2.imread(
                os.path.join(self.root_dir, "labels",
                             image_name[:-4] + ".png"),
                cv2.IMREAD_GRAYSCALE,
            )
            self.orient_image = Image.open(
                os.path.join(self.root_dir, "images",
                             image_name[:-4] + ".jpg"))

            self.orient = mat_img.copy()
            self.orient_m = mat_img
            mat_img = mat_img.copy()
            self.orient_mask = orient_mask.copy()
            orient = mat_img / 255.0 * math.pi
            H, W = orient.shape
            orient_rgb = np.zeros((H, W, 3))
            orient_rgb[..., 1] = (np.sin(2 * orient) + 1) / 2
            orient_rgb[..., 0] = (np.cos(2 * orient) + 1) / 2
            orient_rgb[..., 2] = 0.5
            orient_rgb *= orient_mask[..., np.newaxis]
            orient_rgb = np.uint8(orient_rgb * 255.0)
            image = QImage(
                orient_rgb,
                self.img_size,
                self.img_size,
                self.img_size * 3,
                QImage.Format_RGB888,
            )

            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            pixmap = QPixmap()
            pixmap.convertFromImage(image)
            self.orient_show = pixmap.scaled(self.graphicsView_2.size(),
                                             Qt.IgnoreAspectRatio)
            self.orient_scene.reset()
            if len(self.orient_scene.items()) > 0:
                self.orient_scene.reset_items()
            self.orient_scene.addPixmap(self.orient_show)

    def open_orient(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  self.opt.demo_data_dir)
        if fileName:
            image_name = fileName.split("/")[-1]
            mat_img = cv2.imread(fileName, cv2.IMREAD_GRAYSCALE)
            orient_mask = cv2.imread(
                os.path.join(self.root_dir, "labels",
                             image_name[:-17] + ".png"),
                cv2.IMREAD_GRAYSCALE,
            )
            self.orient_image = Image.open(
                os.path.join(self.root_dir, "images",
                             image_name[:-17] + ".jpg"))
            # mat_img = imresize(mat_img, (self.img_size, self.img_size), interp='nearest')

            # mat_img = Image.open(fileName)
            # mat_img = np.array(mat_img.resize((self.size,self.size)))

            self.orient = mat_img.copy()
            self.orient_m = mat_img
            mat_img = mat_img.copy()
            # transfor to RGB
            # orient_mask = mat_img.copy()
            # orient_mask[orient_mask > 0] = 1
            # orient_mask = dliate_erode(orient_mask, 10)
            self.orient_mask = orient_mask.copy()
            orient = mat_img / 255.0 * math.pi
            H, W = orient.shape
            orient_rgb = np.zeros((H, W, 3))
            orient_rgb[..., 1] = (np.sin(2 * orient) + 1) / 2
            orient_rgb[..., 0] = (np.cos(2 * orient) + 1) / 2
            orient_rgb[..., 2] = 0.5
            orient_rgb *= orient_mask[..., np.newaxis]
            orient_rgb = np.uint8(orient_rgb * 255.0)
            # orient_save = Image.fromarray(np.uint8(orient_rgb)).convert('RGB')
            # orient_save.save('./inference_samples/original_orient.png')
            image = QImage(
                orient_rgb,
                self.img_size,
                self.img_size,
                self.img_size * 3,
                QImage.Format_RGB888,
            )

            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            pixmap = QPixmap()
            pixmap.convertFromImage(image)
            self.orient_show = pixmap.scaled(self.graphicsView_2.size(),
                                             Qt.IgnoreAspectRatio)
            self.orient_scene.reset()
            if len(self.orient_scene.items()) > 0:
                self.orient_scene.reset_items()
            self.orient_scene.addPixmap(self.orient_show)

    def open_mask(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  self.opt.demo_data_dir)
        if fileName:
            mat_img = cv2.imread(fileName)
            # mat_img = imresize(mat_img, (self.img_size, self.img_size), interp='nearest')

            # mat_img = Image.open(fileName)
            # mat_img = np.array(mat_img.resize((self.size,self.size)))

            self.mask = mat_img.copy()  # original mask
            self.mask_m = mat_img  # edited mask
            mat_img = mat_img.copy()
            image = QImage(mat_img, self.img_size, self.img_size,
                           QImage.Format_RGB888)

            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            for i in range(self.img_size):
                for j in range(self.img_size):
                    r, g, b, a = image.pixelColor(i, j).getRgb()
                    image.setPixel(i, j, color_list[r].rgb())

            pixmap = QPixmap()
            pixmap.convertFromImage(image)
            self.mask_show = pixmap.scaled(self.graphicsView.size(),
                                           Qt.IgnoreAspectRatio)
            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.mask_show)

    def bg_mode(self):
        self.scene.mode = 0

    def hair_mode(self):
        self.scene.mode = 1

    def increase(self):
        if self.scene.size < 15:
            self.scene.size += 1

    def decrease(self):
        if self.scene.size > 1:
            self.scene.size -= 1

    def edit(self):
        # get the edited mask
        self.mask_m = self.mask.copy()
        for i in range(2):
            self.mask_m = self.make_mask(self.mask_m,
                                         self.scene.mask_points[i],
                                         self.scene.size_points[i], i)

        # get the edited orient
        orient_new = self.mask_m.copy()
        orient_new = self.make_mask(orient_new, self.scene.mask_points[2],
                                    self.scene.size_points[2], 2)
        vis_stroke = orient_new.copy()
        orient_new[orient_new == 1] = 0
        orient_new[orient_new == 2] = 1
        mask_stroke = orient_new.copy()[:, :, 0]
        dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50, 50))
        mask_hole = cv2.dilate(np.uint8(orient_new), dilate_kernel)[:, :, 0]
        cal_stroke_orient = cal_orient_stroke.orient()
        orient_stroke = cal_stroke_orient.stroke_to_orient(mask_stroke)

        # process the tag image
        ranges = np.unique(self.mask - self.mask_m)
        if (not self.clickButtion1.isChecked()
                and self.recon_tag_img is not None and 1 in ranges):
            tag_image = self.recon_tag_img.copy()
        else:
            tag_image = self.tag_img.copy()

        if self.clickButtion1.isChecked():
            # reference mask
            print("select Reference Mask")
            if self.clickButtion3.isChecked():
                # reference orient
                print("select Reference Orientation")
                self.model.opt.inpaint_mode = "ref"
                data = demo_inference_dataLoad(
                    self.opt,
                    self.ref_mask_path,
                    self.mask[:, :, 0],
                    self.orient_mask.copy(),
                    self.orient,
                    self.ref_img,
                    tag_image,
                )
            else:
                print("select Edited Orientation")
                self.model.opt.inpaint_mode = "stroke"
                data = demo_inference_dataLoad(
                    self.opt,
                    self.ref_mask_path,
                    self.mask[:, :, 0],
                    self.orient_mask.copy(),
                    self.orient,
                    self.ref_img,
                    tag_image,
                    orient_stroke,
                    mask_stroke,
                    mask_hole,
                )
        else:
            # Edited mask
            print("select Edited Mask")
            if self.clickButtion3.isChecked():
                # reference orient
                print("select Reference Orientation")
                self.model.opt.inpaint_mode = "ref"
                data = demo_inference_dataLoad(
                    self.opt,
                    self.ref_mask_path,
                    self.mask_m[:, :, 0],
                    self.orient_mask.copy(),
                    self.orient,
                    self.ref_img,
                    tag_image,
                )
            else:
                print("select Edited Orientation")
                self.model.opt.inpaint_mode = "stroke"
                data = demo_inference_dataLoad(
                    self.opt,
                    self.ref_mask_path,
                    self.mask_m[:, :, 0],
                    self.orient_mask.copy(),
                    self.orient,
                    self.ref_img,
                    tag_image,
                    orient_stroke,
                    mask_stroke,
                    mask_hole,
                )

        start_t = time.time()
        generated, new_orient_rgb = self.model(data, mode="demo_inference")
        end_t = time.time()
        print("inference time : {}".format(end_t - start_t))

        # # save_image((generated.data[0] + 1) / 2,'./results/1.jpg')
        # result = tensor2im(generated[0])
        # fake_image = Image.fromarray(result)
        # fake_image.save('./inference_samples/inpaint_fake_image.jpg')
        if self.opt.add_feat_zeros:
            th = self.opt.add_th
            tmp = generated[:, :,
                            int(th / 2):int(th / 2) + self.opt.crop_size,
                            int(th / 2):int(th / 2) + self.opt.crop_size, ]
            generated = tmp
        result = generated.permute(0, 2, 3, 1)
        result = result.cpu().numpy()
        result = (result + 1) * 127.5
        result = np.asarray(result[0, :, :, :], dtype=np.uint8)
        # update the self.save_datas
        self.save_datas["result"] = Image.fromarray(result.copy())
        self.save_datas["ref_img"] = self.ref_img.copy()
        self.save_datas["tag_img"] = self.tag_img.copy()
        self.save_datas["ori_img"] = self.orient_image.copy()
        vis_stroke[vis_stroke == 1] = 255
        vis_stroke[vis_stroke == 2] = 127
        # save
        # stroke_save = Image.fromarray(np.uint8(vis_stroke))
        # stroke_save.save('inference_samples/stroke_mask.png')

        self.save_datas["stroke"] = Image.fromarray(np.uint8(vis_stroke))
        self.save_datas["mask"] = Image.fromarray(
            np.uint8(self.mask_m[:, :, 0].copy() * 255))

        qim = QImage(
            result.data,
            result.shape[1],
            result.shape[0],
            result.shape[0] * 3,
            QImage.Format_RGB888,
        )
        pixmap = QPixmap()
        pixmap.convertFromImage(qim)
        image = pixmap.scaled(self.graphicsView_3.size(), Qt.IgnoreAspectRatio)

        if len(self.result_scene.items()) > 0:
            self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(image)

        # for orient
        H, W, C = new_orient_rgb.shape
        image = QImage(new_orient_rgb, H, W, W * 3, QImage.Format_RGB888)

        pixmap = QPixmap()
        pixmap.convertFromImage(image)
        image = pixmap.scaled(self.graphicsView_2.size(), Qt.IgnoreAspectRatio)

        if len(self.orient_scene.items()) > 0:
            self.orient_scene.removeItem(self.orient_scene.items()[-1])
        self.orient_scene.addPixmap(image)

    def save(self):
        # for save all results
        print("save..")

        fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
                                                  self.save_dir)
        sum = Image.new(self.save_datas["result"].mode,
                        (5 * self.opt.crop_size, self.opt.crop_size))
        sum.paste(self.save_datas["stroke"], box=(3 * self.opt.crop_size, 0))
        # sum.paste(self.save_datas['mask'], box=(self.opt.crop_size, 0))
        sum.paste(self.save_datas["tag_img"], box=(0, 0))
        sum.paste(self.save_datas["ref_img"], box=(self.opt.crop_size, 0))
        sum.paste(self.save_datas["ori_img"], box=(2 * self.opt.crop_size, 0))
        sum.paste(self.save_datas["result"], box=(4 * self.opt.crop_size, 0))
        sum.save(fileName + ".jpg")

    def make_mask(self, mask, pts, sizes, color):
        if len(pts) > 0:
            for idx, pt in enumerate(pts):
                cv2.line(mask, pt["prev"], pt["curr"], (color, color, color),
                         sizes[idx])
        return mask

    def save_img(self):
        if type(self.output_img):
            fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
                                                      QDir.currentPath())
            cv2.imwrite(fileName + ".jpg", self.output_img)

    def undo(self):
        self.scene.undo()

    def clear(self):
        self.mask_m = self.mask.copy()

        self.scene.reset_items()
        self.scene.reset()
        if type(self.mask_show):
            self.scene.addPixmap(self.mask_show)

    def save_orient_edit(self, input, name):
        if np.max(input) > 1:
            img = Image.fromarray(np.uint8(input))
        else:
            img = Image.fromarray(np.uint8(input * 255))
        img.save("./inference_samples/" + name)

    def orient_edit(self):
        # get the new mask
        for i in range(2):
            self.mask_m = self.make_mask(self.mask_m,
                                         self.scene.mask_points[i],
                                         self.scene.size_points[i], i)
        # get the edited orient
        orient_new = self.mask_m
        orient_new = self.make_mask(orient_new, self.scene.mask_points[2],
                                    self.scene.size_points[2], 2)
        orient_new[orient_new == 1] = 0
        orient_new[orient_new == 2] = 1
        # self.save_orient_edit(orient_new[...,0],'edited_orient.jpg')
        dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
        orient_new = cv2.dilate(np.uint8(orient_new), dilate_kernel)
        print(np.unique(orient_new))
        # self.save_orient_edit(orient_new[...,0], 'Gauss_edited_orient.jpg')

        # image = QImage(orient_new, self.img_size, self.img_size, QImage.Format_RGB888)
        #
        # for i in range(self.img_size):
        #     for j in range(self.img_size):
        #         r, g, b, a = image.pixelColor(i, j).getRgb()
        #         image.setPixel(i, j, color_list[r].rgb())
        #
        # pixmap = QPixmap()
        # pixmap.convertFromImage(image)
        # mask_show = pixmap.scaled(self.graphicsView.size(), Qt.IgnoreAspectRatio)
        # self.orient_scene.reset_items()
        # self.orient_scene.reset()
        # if type(mask_show):
        #     self.orient_scene.addPixmap(mask_show)

    def orient_mode(self):
        # self.orient_scene.mode = 1
        self.scene.mode = 2
        # self.orient_scene.size = 5

    def erase_mode(self):
        self.orient_scene.mode = 0
        self.orient_scene.size = 14

    def orient_increase(self):
        if self.scene.size < 15:
            self.scene.size += 1

    def orient_decrease(self):
        if self.scene.size > 1:
            self.scene.size -= 1

    def selectM(self):
        if self.clickButtion1.isChecked():
            print("select Reference Mask")
        elif self.clickButtion2.isChecked():
            print("select Edited Mask")

    def selectO(self):
        if self.clickButtion3.isChecked():
            print("select Reference Orient")
        elif self.clickButtion4.isChecked():
            print("select Edited Orient")
Example #9
0
class Ex(QWidget, Ui_Form):
    def __init__(self, model, config):
        super().__init__()
        self.setupUi(self)
        self.show()
        self.model = model
        self.config = config
        self.model.load_demo_graph(config)

        self.output_img = None

        self.mat_img = None

        self.ld_mask = None
        self.ld_sk = None

        self.modes = [0,0,0]
        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None

    def mode_select(self, mode):
        for i in range(len(self.modes)):
            self.modes[i] = 0
        self.modes[mode] = 1

    def open(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                QDir.currentPath())
        if fileName:
            image = QPixmap(fileName)
            mat_img = cv2.imread(fileName)
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                        "Cannot load %s." % fileName)
                return

            # redbrush = QBrush(Qt.red)
            # blackpen = QPen(Qt.black)
            # blackpen.setWidth(5)
            self.image = image.scaled(self.graphicsView.size(), Qt.IgnoreAspectRatio)
            mat_img = cv2.resize(mat_img, (512, 512), interpolation=cv2.INTER_CUBIC)
            mat_img = mat_img/127.5 - 1
            self.mat_img = np.expand_dims(mat_img,axis=0)
            self.scene.reset()
            if len(self.scene.items())>0:
                self.scene.reset_items()
            self.scene.addPixmap(self.image)
            if len(self.result_scene.items())>0:
                self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(self.image)

    def mask_mode(self):
        self.mode_select(0)

    def sketch_mode(self):
        self.mode_select(1)

    def stroke_mode(self):
        if not self.color:
            self.color_change_mode()
        self.scene.get_stk_color(self.color)
        self.mode_select(2)

    def color_change_mode(self):
        self.dlg.exec_()
        self.color = self.dlg.currentColor().name()
        self.pushButton_4.setStyleSheet("background-color: %s;" % self.color)
        self.scene.get_stk_color(self.color)

    def complete(self):
        sketch = self.make_sketch(self.scene.sketch_points)
        stroke = self.make_stroke(self.scene.stroke_points)
        mask = self.make_mask(self.scene.mask_points)
        if not type(self.ld_mask)==type(None):
            ld_mask = np.expand_dims(self.ld_mask[:,:,0:1],axis=0)
            ld_mask[ld_mask>0] = 1
            ld_mask[ld_mask<1] = 0
            mask = mask+ld_mask
            mask[mask>0] = 1
            mask[mask<1] = 0
            mask = np.asarray(mask,dtype=np.uint8)
            print(mask.shape)

        if not type(self.ld_sk)==type(None):
            sketch = sketch+self.ld_sk
            sketch[sketch>0]=1 

        noise = self.make_noise()

        sketch = sketch*mask
        stroke = stroke*mask
        noise = noise*mask

        batch = np.concatenate(
                    [self.mat_img,
                     sketch,
                     stroke,
                     mask,
                     noise],axis=3)
        start_t = time.time()
        result = self.model.demo(self.config, batch)
        end_t = time.time()
        print('inference time : {}'.format(end_t-start_t))
        result = (result+1)*127.5
        result = np.asarray(result[0,:,:,:],dtype=np.uint8)
        self.output_img = result
        result = np.concatenate([result[:,:,2:3],result[:,:,1:2],result[:,:,:1]],axis=2)
        qim = QImage(result.data, result.shape[1], result.shape[0], result.strides[0], QImage.Format_RGB888)
        self.result_scene.removeItem(self.result_scene.items()[-1])
        self.result_scene.addPixmap(QPixmap.fromImage(qim))

    def make_noise(self):
        noise = np.zeros([512, 512, 1],dtype=np.uint8)
        noise = cv2.randn(noise, 0, 255)
        noise = np.asarray(noise/255,dtype=np.uint8)
        noise = np.expand_dims(noise,axis=0)
        return noise

    def make_mask(self, pts):
        if len(pts)>0:
            mask = np.zeros((512,512,3))
            for pt in pts:
                cv2.line(mask,pt['prev'],pt['curr'],(255,255,255),12)
            mask = np.asarray(mask[:,:,0]/255,dtype=np.uint8)
            mask = np.expand_dims(mask,axis=2)
            mask = np.expand_dims(mask,axis=0)
        else:
            mask = np.zeros((512,512,3))
            mask = np.asarray(mask[:,:,0]/255,dtype=np.uint8)
            mask = np.expand_dims(mask,axis=2)
            mask = np.expand_dims(mask,axis=0)
        return mask

    def make_sketch(self, pts):
        if len(pts)>0:
            sketch = np.zeros((512,512,3))
            # sketch = 255*sketch
            for pt in pts:
                cv2.line(sketch,pt['prev'],pt['curr'],(255,255,255),1)
            sketch = np.asarray(sketch[:,:,0]/255,dtype=np.uint8)
            sketch = np.expand_dims(sketch,axis=2)
            sketch = np.expand_dims(sketch,axis=0)
        else:
            sketch = np.zeros((512,512,3))
            # sketch = 255*sketch
            sketch = np.asarray(sketch[:,:,0]/255,dtype=np.uint8)
            sketch = np.expand_dims(sketch,axis=2)
            sketch = np.expand_dims(sketch,axis=0)
        return sketch

    def make_stroke(self, pts):
        if len(pts)>0:
            stroke = np.zeros((512,512,3))
            for pt in pts:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i+2], 16) for i in (0, 2 ,4))
                color = (color[2],color[1],color[0])
                cv2.line(stroke,pt['prev'],pt['curr'],color,4)
            stroke = stroke/127.5 - 1
            stroke = np.expand_dims(stroke,axis=0)
        else:
            stroke = np.zeros((512,512,3))
            stroke = stroke/127.5 - 1
            stroke = np.expand_dims(stroke,axis=0)
        return stroke

    def arrange(self):
        image = np.asarray((self.mat_img[0]+1)*127.5,dtype=np.uint8)
        if len(self.scene.mask_points)>0:
            for pt in self.scene.mask_points:
                cv2.line(image,pt['prev'],pt['curr'],(255,255,255),12)
        if len(self.scene.stroke_points)>0:
            for pt in self.scene.stroke_points:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i+2], 16) for i in (0, 2 ,4))
                color = (color[2],color[1],color[0])
                cv2.line(image,pt['prev'],pt['curr'],color,4)
        if len(self.scene.sketch_points)>0:
            for pt in self.scene.sketch_points:
                cv2.line(image,pt['prev'],pt['curr'],(0,0,0),1)        
        cv2.imwrite('tmp.jpg',image)
        image = QPixmap('tmp.jpg')
        self.scene.history.append(3)
        self.scene.addPixmap(image)

    def save_img(self):
        if type(self.output_img):
            fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
                    QDir.currentPath())
            cv2.imwrite(fileName+'.jpg',self.output_img)

    def undo(self):
        self.scene.undo()

    def clear(self):
        self.scene.reset_items()
        self.scene.reset()
        if type(self.image):
            self.scene.addPixmap(self.image)
Example #10
0
class Ex(QWidget, Ui_Form):
    def __init__(self, args):

        super().__init__()

        self.args = args
        self.current_style = 0
        if self.args.load_network:
            import torch
            from sofgan import init_deep_model
            self.styles, self.generator = init_deep_model(
                '../modules/sofgan.pt')
            self.noise = [
                getattr(self.generator.noises, f'noise_{i}')
                for i in range(self.generator.num_layers)
            ]

        self.setupUi(self)
        self.show()

        self.modes = 0
        self.alpha = 0.5

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes, self)
        self.scene.setSceneRect(0, 0, 512, 512)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.GT_scene = QGraphicsScene()
        self.graphicsView_GT.setScene(self.GT_scene)
        self.graphicsView_GT.setAlignment(Qt.AlignCenter)
        self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_GT.setHorizontalScrollBarPolicy(
            Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)

        self.init_screen()

    def init_screen(self):
        self.image = QPixmap(QSize(512, 512))
        self.image.fill(QColor('#000000'))
        self.mat_img = np.zeros([512, 512], np.uint8)

        self.mat_img_org = self.mat_img.copy()

        self.GT_img_path = None
        GT_img = self.mat_img.copy()
        self.GT_img = Image.fromarray(GT_img)
        self.GT_img = self.GT_img.convert('RGB')

        self.last = time.time()

        self.scene.reset()
        if len(self.scene.items()) > 0:
            self.scene.reset_items()

        self.scene_image_pts = self.scene.addPixmap(self.image)
        self.GT_scene_image_pts = self.GT_scene.addPixmap(self.image)

        self.image = np.zeros([512, 512, 3], np.uint8)
        self.image_raw = self.image.copy()
        self.update_segmap_vis(self.mat_img)

        ###############
        self.recorded_img_names = []

        self.frameLog = {}
        self.starTime = datetime.datetime.now().strftime('%H_%M_%S_%f')

    def run_deep_model(self):
        ""
        if self.args.load_network:
            with torch.no_grad():
                seg_label = torch.from_numpy(self.id_remap(self.mat_img)).view(
                    1, 1, 512, 512).float().cuda()
                fake_img, _, _, _ = self.generator(
                    self.styles[self.current_style % len(self.styles)],
                    return_latents=False,
                    condition_img=seg_label,
                    input_is_latent=True,
                    noise=self.noise)
                fake_img = ((fake_img[0].permute(1, 2, 0).cpu() + 1) / 2 *
                            255).clamp_(0, 255).numpy().astype('uint8')
                fake_img = cv2.resize(fake_img, (512, 512))
            self.GT_scene_image_pts.setPixmap(QPixmap.fromImage(QImage(fake_img.data.tobytes(), \
                                                             fake_img.shape[1], fake_img.shape[0],
                                                             QImage.Format_RGB888)))
        else:
            print(
                'Did not load the deep model, you need to specify --load_network if you want to render rgb images'
            )

    def change_style(self):
        self.current_style += 1
        self.run_deep_model()

    @pyqtSlot()
    def open(self):

        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  'F:/Lab/samples')
        if fileName:

            self.mat_img_path = os.path.join(fileName)
            self.fileName = fileName

            # USE CV2 read images, because of using gray scale images, no matter the RGB orders
            mat_img = cv2.imread(self.mat_img_path, -1)
            if mat_img is None:
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            if mat_img.ndim == 2:
                self.mat_img = cv2.resize(mat_img, (512, 512),
                                          interpolation=cv2.INTER_NEAREST)
                self.image = self.segmap2rgb(self.id_remap(self.mat_img))
                self.mat_img_org = self.mat_img.copy()
            else:
                self.image = cv2.resize(mat_img[..., ::-1], (512, 512))

            self.image_raw = self.image.copy()
            self.image = np.round(self.alpha * self.image).astype('uint8')
            image = self.image + (
                self.segmap2rgb(self.id_remap(self.mat_img)) *
                int(1000 * (1.0 - self.alpha)) // 1000).astype('uint8')
            image = QPixmap.fromImage(
                QImage(image.data.tobytes(), self.image.shape[1],
                       self.image.shape[0], QImage.Format_RGB888))

            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()

            self.scene_image_pts = self.scene.addPixmap(image)

            if mat_img.ndim == 2:  # template
                self.update_segmap_vis(self.mat_img)

    @pyqtSlot()
    def open_reference(self):
        fileName, _ = QFileDialog.getOpenFileName(
            self, "Open File",
            QDir.currentPath() + '/samples')
        if fileName:

            self.mat_img_path = os.path.join(fileName)
            self.fileName = fileName

            mat_img = cv2.imread(self.mat_img_path, 1)

            self.image_raw = cv2.resize(mat_img[..., ::-1], (512, 512))
            self.change_alpha_value()

    def update_segmap_vis(self, segmap):
        ""

        if not self.args.load_network:
            self.GT_scene_image_pts.setPixmap(QPixmap.fromImage(QImage((10 * segmap).data.tobytes(), \
                                                             segmap.shape[1], segmap.shape[0],
                                                             QImage.Format_Grayscale8)))

        out = self.image + (self.segmap2rgb(self.id_remap(self.mat_img)) *
                            int(1000 *
                                (1.0 - self.alpha)) // 1000).astype('uint8')
        self.scene_image_pts.setPixmap(QPixmap.fromImage(QImage(out.data.tobytes(), \
                                                         out.shape[1], out.shape[0],
                                                         QImage.Format_RGB888)))

        print('FPS: %s' % (1.0 / (time.time() - self.last)))
        self.last = time.time()

    @pyqtSlot()
    def change_brush_size(self):
        self.scene.brush_size = self.brushSlider.value()
        self.brushsizeLabel.setText('Brush size: %d' % self.scene.brush_size)

    @pyqtSlot()
    def change_alpha_value(self):
        self.alpha = self.alphaSlider.value() / 20
        self.alphaLabel.setText('Alpha: %.2f' % self.alpha)

        self.image = np.round(self.image_raw * self.alpha).astype('uint8')
        out = self.image + (self.segmap2rgb(self.id_remap(self.mat_img)) *
                            int(1000 *
                                (1.0 - self.alpha)) // 1000).astype('uint8')

        self.scene_image_pts.setPixmap(QPixmap.fromImage(QImage(out.data.tobytes(), \
                                                         out.shape[1], out.shape[0],
                                                         QImage.Format_RGB888)))

    @pyqtSlot()
    def mode_select(self, mode):
        self.modes = mode
        self.scene.modes = mode

        if mode == 0:
            self.brushButton.setStyleSheet("background-color: #85adad")
            self.recButton.setStyleSheet("background-color:")
            self.fillButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.ArrowCursor)
        elif mode == 1:
            self.recButton.setStyleSheet("background-color: #85adad")
            self.brushButton.setStyleSheet("background-color:")
            self.fillButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.ArrowCursor)
        elif mode == 2:
            self.fillButton.setStyleSheet("background-color: #85adad")
            self.brushButton.setStyleSheet("background-color:")
            self.recButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.PointingHandCursor)

    def segmap2rgb(self, img):
        part_colors = np.array([
            [0, 0, 0],
            [127, 212, 255],
            [255, 255, 127],
            [255, 255, 170],  # 'skin',1 'eye_brow'2,  'eye'3
            [240, 157, 240],
            [255, 212, 255],  # 'r_nose'4, 'l_nose'5
            [89, 64, 92],
            [237, 102, 99],
            [181, 43, 101],  # 'mouth'6, 'u_lip'7,'l_lip'8
            [0, 255, 85],
            [0, 255, 170],  # 'ear'9 'ear_r'10
            [255, 255, 170],
            [127, 170, 255],
            [85, 0, 255],
            [255, 170, 127],  # 'neck'11, 'neck_l'12, 'cloth'13
            [212, 127, 255],
            [0, 170, 255],  # , 'hair'14, 'hat'15
            [255, 255, 0],
            [255, 255, 85],
            [255, 255, 170],
            [255, 0, 255],
            [255, 85, 255],
            [255, 170, 255],
            [0, 255, 255],
            [85, 255, 255],
            [170, 255, 255],
            [100, 150, 200]
        ]).astype('int')

        condition_img_color = part_colors[img]
        return condition_img_color

    def id_remap(self, seg):
        remap_list = np.array([
            0, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9, 10, 11, 12, 13, 14, 15, 16
        ]).astype('uint8')
        return remap_list[seg.astype('int')]

    @pyqtSlot()
    def save_img(self):

        ui_result_folder = './ui_results/' + os.path.basename(
            self.fileName)[:-4]

        os.makedirs(ui_result_folder, exist_ok=True)

        outName = os.path.join(
            ui_result_folder,
            datetime.datetime.now().strftime('%m%d%H%M%S') + '_segmap.png')
        cv2.imwrite(outName, self.mat_img)
        print('===> save segmap to %s' % outName)

    @pyqtSlot()
    def switch_labels(self, label):
        self.scene.label = label
        self.scene.color = number_color[label]
        _translate = QCoreApplication.translate
        self.color_Button.setText(
            _translate("Form", "%s" % number_object[label]))
        self.color_Button.setStyleSheet("background-color: %s;" %
                                        self.scene.color + " color: black")

    @pyqtSlot()
    def undo(self):
        self.scene.undo()

    def startScreening(self):
        self.isScreening, self.frameLog = True, {}
        self.starTime = datetime.datetime.now().strftime('%H_%M_%S_%f')

    def saveScreening(self):
        os.makedirs('./frameLog', exist_ok=True)
        name = './frameLog/%s.pkl' % self.starTime
        with open(name, 'wb') as f:
            pickle.dump(self.frameLog, f)
        print('====> saved frame log to %s' % name)

    def cleanForground(self):
        self.mat_img[:] = 0
        self.update_segmap_vis(self.mat_img)
        self.frameLog[datetime.datetime.now().strftime('%H:%M:%S:%f')] = {
            'undo': len(self.frameLog.keys())
        }
Example #11
0
class Ex(QtWidgets.QWidget, Ui_Form):
    def __init__(self, model, config):
        super().__init__()

        #self.get_head_outline()
        self._step_counter = 1

        # start camera
        self.record_video = RecordVideo()
        # connect the frame data signal and slot together
        self.record_video.frame_data.connect(self.camera_data_slot)

        # start face detector
        self.detector = MTCNNDetector(device='cpu')

        self.setupUi(self)
        self.show()
        self.model = model
        self.config = config
        self.model.load_demo_graph(config)

        self.output_img = None

        self.mat_img = None

        self.ld_mask = None
        self.ld_sk = None

        self._frame_data = None

        self.modes = [0, 0, 0]
        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(QtCore.Qt.AlignTop
                                       | QtCore.Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)

        self.result_scene = QtWidgets.QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(QtCore.Qt.AlignTop
                                         | QtCore.Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(
            QtCore.Qt.ScrollBarAlwaysOff)

        self.dlg = QtWidgets.QColorDialog(self.graphicsView)
        self.color = None

    def get_head_outline(self):
        """load head outline."""
        head_outline = cv2.imread('./ui/head_outline.jpg')
        head_outline = head_outline[25:-125, 75:-75, :]
        head_outline = cv2.resize(head_outline, (512, 512),
                                  interpolation=cv2.INTER_CUBIC)
        head_outline[head_outline < 75] = 0.5
        head_outline[head_outline > 1] = 1
        self._head_outline = head_outline

    def mode_select(self, mode):
        for i in range(len(self.modes)):
            self.modes[i] = 0
        self.modes[mode] = 1

    def camera_data_slot(self, frame_data):
        self._step_counter += 1

        self._frame_data = frame_data

        # resize to 512x512
        frame_data = cv2.resize(frame_data, (512, 512),
                                interpolation=cv2.INTER_CUBIC)

        # face detection
        # BGR to RGB first
        im = Image.fromarray(frame_data[:, :, ::-1])
        bboxes, landmarks = self.detector.infer(im, min_face_size=200)
        if len(bboxes):
            _img = show_grids(im, [], landmarks, step=self._step_counter % 3)
            frame_data = np.array(_img)[:, :, ::-1]
            frame_data = face_highlight(frame_data, bboxes, 1.2)
        else:
            frame_data = frame_data.astype('int32')
            frame_data = np.clip(frame_data - 50, 0, 255).astype('uint8')

        # convert data frame into QImage
        h, w, c = frame_data.shape
        bytes_per_line = 3 * w
        _frame_image = QtGui.QImage(frame_data.data, w, h, bytes_per_line,
                                    QtGui.QImage.Format_RGB888)
        _frame_image = _frame_image.rgbSwapped()

        # draw frame
        self.scene.reset()
        if len(self.scene.items()) > 0:
            self.scene.reset_items()
        self.scene.addPixmap(QtGui.QPixmap.fromImage(_frame_image))

    def capture(self):
        self.record_video.timer.stop()
        if self._frame_data is None:
            return

        # face detection
        # BGR to RGB first
        im = Image.fromarray(self._frame_data[:, :, ::-1])
        bboxes, _ = self.detector.infer(im, min_face_size=200)
        if len(bboxes):
            faces = crop_face(self._frame_data, bboxes, 1.4)
            #print(lmarks)
        else:
            return

        if len(faces) == 0:
            return

        # draw landmarks on display image
        #_face_img = Image.fromarray(faces[0][:, :, ::-1])
        #_, landmarks = self.detector.infer(_face_img, min_face_size=100)
        ##_face_img = show_bboxes(_face_img, [], landmarks)
        #_face_img = show_grids(_face_img, [], landmarks)
        #_face_img = np.array(_face_img)[:, :, ::-1]

        # convert data frame into QImage
        self._frame_data = faces[0]
        h, w, c = self._frame_data.shape
        bytes_per_line = 3 * w
        _frame_image = QtGui.QImage(self._frame_data.data, w, h,
                                    bytes_per_line, QtGui.QImage.Format_RGB888)
        #_frame_image = QtGui.QImage(_face_img.copy(), w, h,
        #                            bytes_per_line,
        #                            QtGui.QImage.Format_RGB888)
        _frame_image = _frame_image.rgbSwapped()
        image = QtGui.QPixmap.fromImage(_frame_image)
        mat_img = self._frame_data

        self.image = image.scaled(self.graphicsView.size(),
                                  QtCore.Qt.IgnoreAspectRatio)
        mat_img = mat_img / 127.5 - 1
        self.mat_img = np.expand_dims(mat_img, axis=0)
        self.scene.reset()
        if len(self.scene.items()) > 0:
            self.scene.reset_items()
        self.scene.addPixmap(self.image)
        if len(self.result_scene.items()) > 0:
            self.result_scene.removeItem(self.result_scene.items()[-1])
        self.result_scene.addPixmap(self.image)

    def open(self):
        fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
            self, "Open File", QtCore.QDir.currentPath())
        if fileName:
            image = QtGui.QPixmap(fileName)
            mat_img = cv2.imread(fileName)
            if image.isNull():
                QtWidgets.QMessageBox.information(self, "Image Viewer",
                                                  "Cannot load %s." % fileName)
                return

            # redbrush = QBrush(Qt.red)
            # blackpen = QPen(Qt.black)
            # blackpen.setWidth(5)
            self.image = image.scaled(self.graphicsView.size(),
                                      QtCore.Qt.IgnoreAspectRatio)
            mat_img = cv2.resize(mat_img, (512, 512),
                                 interpolation=cv2.INTER_CUBIC)
            mat_img = mat_img / 127.5 - 1
            self.mat_img = np.expand_dims(mat_img, axis=0)
            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.image)
            if len(self.result_scene.items()) > 0:
                self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(self.image)

    def mask_mode(self):
        self.mode_select(0)

    def sketch_mode(self):
        self.mode_select(1)

    def stroke_mode(self):
        if not self.color:
            self.color_change_mode()
        self.scene.get_stk_color(self.color)
        self.mode_select(2)

    def color_change_mode(self):
        self.dlg.exec_()
        self.color = self.dlg.currentColor().name()
        self.pushButton_4.setStyleSheet("background-color: %s;" % self.color)
        self.scene.get_stk_color(self.color)

    def complete(self):
        sketch = self.make_sketch(self.scene.sketch_points)
        stroke = self.make_stroke(self.scene.stroke_points)
        mask = self.make_mask(self.scene.mask_points)
        if not type(self.ld_mask) == type(None):
            ld_mask = np.expand_dims(self.ld_mask[:, :, 0:1], axis=0)
            ld_mask[ld_mask > 0] = 1
            ld_mask[ld_mask < 1] = 0
            mask = mask + ld_mask
            mask[mask > 0] = 1
            mask[mask < 1] = 0
            mask = np.asarray(mask, dtype=np.uint8)
            print(mask.shape)

        if not type(self.ld_sk) == type(None):
            sketch = sketch + self.ld_sk
            sketch[sketch > 0] = 1

        noise = self.make_noise()

        sketch = sketch * mask
        stroke = stroke * mask
        noise = noise * mask

        batch = np.concatenate([self.mat_img, sketch, stroke, mask, noise],
                               axis=3)
        start_t = time.time()
        result = self.model.demo(self.config, batch)
        end_t = time.time()
        print('inference time : {}'.format(end_t - start_t))
        result = (result + 1) * 127.5
        result = np.asarray(result[0, :, :, :], dtype=np.uint8)
        self.output_img = result
        result = np.concatenate(
            [result[:, :, 2:3], result[:, :, 1:2], result[:, :, :1]], axis=2)
        qim = QtGui.QImage(result.data, result.shape[1], result.shape[0],
                           result.strides[0], QtGui.QImage.Format_RGB888)
        self.result_scene.removeItem(self.result_scene.items()[-1])
        self.result_scene.addPixmap(QtGui.QPixmap.fromImage(qim))

    def make_noise(self):
        noise = np.zeros([512, 512, 1], dtype=np.uint8)
        noise = cv2.randn(noise, 0, 255)
        noise = np.asarray(noise / 255, dtype=np.uint8)
        noise = np.expand_dims(noise, axis=0)
        return noise

    def make_mask(self, pts):
        if len(pts) > 0:
            mask = np.zeros((512, 512, 3))
            for pt in pts:
                cv2.line(mask, pt['prev'], pt['curr'], (255, 255, 255), 12)
            mask = np.asarray(mask[:, :, 0] / 255, dtype=np.uint8)
            mask = np.expand_dims(mask, axis=2)
            mask = np.expand_dims(mask, axis=0)
        else:
            mask = np.zeros((512, 512, 3))
            mask = np.asarray(mask[:, :, 0] / 255, dtype=np.uint8)
            mask = np.expand_dims(mask, axis=2)
            mask = np.expand_dims(mask, axis=0)
        return mask

    def make_sketch(self, pts):
        if len(pts) > 0:
            sketch = np.zeros((512, 512, 3))
            # sketch = 255*sketch
            for pt in pts:
                cv2.line(sketch, pt['prev'], pt['curr'], (255, 255, 255), 1)
            sketch = np.asarray(sketch[:, :, 0] / 255, dtype=np.uint8)
            sketch = np.expand_dims(sketch, axis=2)
            sketch = np.expand_dims(sketch, axis=0)
        else:
            sketch = np.zeros((512, 512, 3))
            # sketch = 255*sketch
            sketch = np.asarray(sketch[:, :, 0] / 255, dtype=np.uint8)
            sketch = np.expand_dims(sketch, axis=2)
            sketch = np.expand_dims(sketch, axis=0)
        return sketch

    def make_stroke(self, pts):
        if len(pts) > 0:
            stroke = np.zeros((512, 512, 3))
            for pt in pts:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i + 2], 16) for i in (0, 2, 4))
                color = (color[2], color[1], color[0])
                cv2.line(stroke, pt['prev'], pt['curr'], color, 4)
            stroke = stroke / 127.5 - 1
            stroke = np.expand_dims(stroke, axis=0)
        else:
            stroke = np.zeros((512, 512, 3))
            stroke = stroke / 127.5 - 1
            stroke = np.expand_dims(stroke, axis=0)
        return stroke

    def arrange(self):
        image = np.asarray((self.mat_img[0] + 1) * 127.5, dtype=np.uint8)
        if len(self.scene.mask_points) > 0:
            for pt in self.scene.mask_points:
                cv2.line(image, pt['prev'], pt['curr'], (255, 255, 255), 12)
        if len(self.scene.stroke_points) > 0:
            for pt in self.scene.stroke_points:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i + 2], 16) for i in (0, 2, 4))
                color = (color[2], color[1], color[0])
                cv2.line(image, pt['prev'], pt['curr'], color, 4)
        if len(self.scene.sketch_points) > 0:
            for pt in self.scene.sketch_points:
                cv2.line(image, pt['prev'], pt['curr'], (0, 0, 0), 1)
        cv2.imwrite('tmp.jpg', image)
        image = QtGui.QPixmap('tmp.jpg')
        self.scene.history.append(3)
        self.scene.addPixmap(image)

    def save_img(self):
        if type(self.output_img):
            fileName, _ = QtWidgets.QFileDialog.getSaveFileName(
                self, "Save File", QtCore.QDir.currentPath())
            cv2.imwrite(fileName + '.jpg', self.output_img)

    def undo(self):
        self.scene.undo()

    def clear(self):
        self.scene.reset_items()
        self.scene.reset()
        if type(self.image):
            self.scene.addPixmap(self.image)
Example #12
0
class Ex(QWidget, Ui_Form):
    def __init__(self, model):
        super().__init__()

        self.setupUi(self)
        self.show()
        self.model = model

        self.output_img = None

        self.mat_img = None

        self.ld_mask = None
        self.ld_sk = None

        self.modes = [0, 0, 0]
        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)
        self.color = None

    def mode_select(self, mode):
        for i in range(len(self.modes)):
            self.modes[i] = 0
        self.modes[mode] = 1

    def open(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  QDir.currentPath())
        if fileName:
            image = QPixmap(fileName)
            # print(fileName)
            file_label = 'test_label/' + fileName.split('/')[-1]
            # print(file_label)
            file_label = file_label.split('.')[0] + '.png'
            self.mat_label = cv2.imread(file_label)
            # print(self.mat_label)

            self.mat_label = cv2.cvtColor(self.mat_label, cv2.COLOR_BGR2GRAY)
            mat_img = cv2.imread(fileName)

            mat_img = cv2.cvtColor(mat_img, cv2.COLOR_BGR2RGB)
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            # redbrush = QBrush(Qt.red)
            # blackpen = QPen(Qt.black)
            # blackpen.setWidth(5)
            self.image = image.scaled(self.graphicsView.size(),
                                      Qt.IgnoreAspectRatio)
            mat_img = mat_img / 127.5 - 1
            self.mat_img = np.expand_dims(mat_img, axis=0)
            # cv2.imshow('a',self.mat_img.squeeze())
            # cv2.waitKey(0)
            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.image)
            if len(self.result_scene.items()) > 0:
                self.result_scene.removeItem(self.result_scene.items()[-1])
            self.result_scene.addPixmap(self.image)

    def mask_mode(self):
        self.mode_select(0)

    def sketch_mode(self):
        self.mode_select(1)

    def stroke_mode(self):
        if not self.color:
            self.color_change_mode()
        self.scene.get_stk_color(self.color)
        self.mode_select(2)

    def color_change_mode(self):
        self.dlg.exec_()
        self.color = self.dlg.currentColor().name()
        self.pushButton_4.setStyleSheet("background-color: %s;" % self.color)
        self.scene.get_stk_color(self.color)

    def complete(self):
        global COUNT
        sketch = self.make_sketch(self.scene.sketch_points)
        sketch = torch.FloatTensor(sketch)
        stroke = self.make_stroke(self.scene.stroke_points)
        stroke = torch.FloatTensor(stroke)
        mask = self.make_mask(self.scene.mask_points)
        mask = torch.FloatTensor(mask)
        self.mat_img = torch.FloatTensor(self.mat_img)
        if not type(self.ld_mask) == type(None):
            ld_mask = np.expand_dims(self.ld_mask[:, :, 0:1], axis=0)
            ld_mask[ld_mask > 0] = 1
            ld_mask[ld_mask < 1] = 0
            mask = mask + ld_mask
            mask[mask > 0] = 1
            mask[mask < 1] = 0
            mask = np.asarray(mask, dtype=np.uint8)

        if not type(self.ld_sk) == type(None):
            sketch = sketch + self.ld_sk
            sketch[sketch > 0] = 1

        noise = self.make_noise()
        noise = torch.FloatTensor(noise)
        self.mat_label = torch.FloatTensor(self.mat_label)
        self.mat_label = self.mat_label.reshape(1, 512, 320, 1)

        start_t = time.time()
        result = self.model.inference(
            self.mat_label.permute(0, 3, 1, 2).cuda(),
            self.mat_img.permute(0, 3, 1, 2).cuda(),
            sketch.permute(0, 3, 1, 2).cuda(),
            stroke.permute(0, 3, 1, 2).cuda(),
            mask.permute(0, 3, 1, 2).cuda(),
            noise.permute(0, 3, 1, 2).cuda())
        result = result * mask.permute(
            0, 3, 1, 2).cuda() + self.mat_img.permute(
                0, 3, 1, 2).cuda() * (1 - mask.permute(0, 3, 1, 2).cuda())
        end_t = time.time()
        print('inference time : {}'.format(end_t - start_t))
        result = (result + 1) * 127.5
        result = result.permute(0, 2, 3, 1)
        result = result.cpu().numpy()
        result = np.asarray(result[0, :, :, :], dtype=np.uint8)

        self.output_img = result
        qim = QImage(result.data, result.shape[1], result.shape[0],
                     result.strides[0], QImage.Format_RGB888)
        self.result_scene.removeItem(self.result_scene.items()[-1])
        self.result_scene.addPixmap(QPixmap.fromImage(qim))
        COUNT += 1

    def make_noise(self):
        noise = np.zeros([512, 320, 1], dtype=np.uint8)
        noise = cv2.randn(noise, 0, 255)
        noise = np.asarray(noise / 255, dtype=np.uint8)
        noise = np.expand_dims(noise, axis=0)
        return noise

    def make_mask(self, pts):
        if len(pts) > 0:
            mask = np.zeros((512, 320, 3))
            for pt in pts:
                cv2.line(mask, pt['prev'], pt['curr'], (255, 255, 255), 12)
            mask = np.asarray(mask[:, :, 0] / 255, dtype=np.uint8)
            mask = np.expand_dims(mask, axis=2)
            mask = np.expand_dims(mask, axis=0)
        else:
            mask = np.zeros((512, 320, 3))
            mask = np.asarray(mask[:, :, 0] / 255, dtype=np.uint8)
            mask = np.expand_dims(mask, axis=2)
            mask = np.expand_dims(mask, axis=0)
        return mask

    def make_sketch(self, pts):
        if len(pts) > 0:
            sketch = np.zeros((512, 320, 3))
            # sketch = 255*sketch
            for pt in pts:
                cv2.line(sketch, pt['prev'], pt['curr'], (255, 255, 255), 1)
            sketch = np.asarray(sketch[:, :, 0] / 255, dtype=np.uint8)
            sketch = np.expand_dims(sketch, axis=2)
            sketch = np.expand_dims(sketch, axis=0)
        else:
            sketch = np.zeros((512, 320, 3))
            # sketch = 255*sketch
            sketch = np.asarray(sketch[:, :, 0] / 255, dtype=np.uint8)
            sketch = np.expand_dims(sketch, axis=2)
            sketch = np.expand_dims(sketch, axis=0)
        return sketch

    def make_stroke(self, pts):
        if len(pts) > 0:
            stroke = np.zeros((512, 320, 3))
            for pt in pts:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i + 2], 16) for i in (0, 2, 4))
                cv2.line(stroke, pt['prev'], pt['curr'], color, 4)
            stroke = stroke / 127.5 - 1
            stroke = np.expand_dims(stroke, axis=0)
        else:
            stroke = np.zeros((512, 320, 3))
            stroke = stroke / 127.5 - 1
            stroke = np.expand_dims(stroke, axis=0)
        return stroke

    def arrange(self):
        image = np.asarray((self.mat_img[0] + 1) * 127.5, dtype=np.uint8)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        if len(self.scene.mask_points) > 0:
            for pt in self.scene.mask_points:
                cv2.line(image, pt['prev'], pt['curr'], (255, 255, 255), 12)
        if len(self.scene.stroke_points) > 0:
            for pt in self.scene.stroke_points:
                c = pt['color'].lstrip('#')
                color = tuple(int(c[i:i + 2], 16) for i in (0, 2, 4))
                color = (color[2], color[1], color[0])
                cv2.line(image, pt['prev'], pt['curr'], color, 4)
        if len(self.scene.sketch_points) > 0:
            for pt in self.scene.sketch_points:
                cv2.line(image, pt['prev'], pt['curr'], (0, 0, 0), 1)
        cv2.imwrite('tmp.jpg', image)
        image = QPixmap('tmp.jpg')
        self.scene.history.append(3)
        self.scene.addPixmap(image)

    def save_img(self):
        if type(self.output_img):
            fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
                                                      QDir.currentPath())
            cv2.imwrite(fileName + '.jpg', self.output_img)

    def undo(self):
        self.scene.undo()

    def clear(self):
        self.scene.reset_items()
        self.scene.reset()
        if type(self.image):
            self.scene.addPixmap(self.image)
Example #13
0
class Ex(QWidget, Ui_Form):
    def __init__(self, opt):

        super().__init__()
        self.init_deep_model(opt)

        self.setupUi(self)
        self.show()

        self.modes = 0
        self.alpha = 1

        self.mouse_clicked = False
        self.scene = GraphicsScene(self.modes, self)
        self.scene.setSceneRect(0, 0, 512, 512)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.result_scene = QGraphicsScene()
        self.graphicsView_2.setScene(self.result_scene)
        self.graphicsView_2.setAlignment(Qt.AlignCenter)
        self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.GT_scene = QGraphicsScene()
        self.graphicsView_GT.setScene(self.GT_scene)
        self.graphicsView_GT.setAlignment(Qt.AlignCenter)
        self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView_GT.setHorizontalScrollBarPolicy(
            Qt.ScrollBarAlwaysOff)

        self.dlg = QColorDialog(self.graphicsView)

        self.init_screen()

    def init_screen(self):
        #self.image = QPixmap(self.graphicsView.size())
        self.image = QPixmap(QSize(512, 512))
        self.image.fill(QColor('#000000'))
        self.mat_img = np.zeros([512, 512, 3], np.uint8)

        self.mat_img_org = self.mat_img.copy()

        self.GT_img_path = None
        GT_img = self.mat_img.copy()
        self.GT_img = Image.fromarray(GT_img)
        self.GT_img = self.GT_img.convert('RGB')

        #################### add GT image
        self.update_GT_image(GT_img)

        #####################

        self.scene.reset()
        if len(self.scene.items()) > 0:
            self.scene.reset_items()
        self.scene.addPixmap(self.image)

        ###############

        ############### load average features

        self.load_average_feature()
        self.run_deep_model()
        self.recorded_img_names = []

    def init_deep_model(self, opt):
        self.opt = opt
        self.model = Pix2PixModel(self.opt)
        self.model.eval()

    def run_deep_model(self):
        torch.manual_seed(0)

        data_i = self.get_single_input()

        if self.obj_dic is not None:
            data_i['obj_dic'] = self.obj_dic

        generated = self.model(data_i, mode='UI_mode')
        generated_img = self.convert_output_image(generated)
        qim = QImage(generated_img.data, generated_img.shape[1],
                     generated_img.shape[0], QImage.Format_RGB888)

        if len(self.result_scene.items()) > 0:
            self.result_scene.removeItem(self.result_scene.items()[-1])
        self.result_scene.addPixmap(
            QPixmap.fromImage(qim).scaled(
                QSize(512, 512), transformMode=Qt.SmoothTransformation))
        self.generated_img = generated_img

    @pyqtSlot()
    def open(self):

        fileName, _ = QFileDialog.getOpenFileName(
            self, "Open File",
            QDir.currentPath() + '/imgs/colormaps')
        if fileName:
            image = QPixmap(fileName)
            self.mat_img_path = os.path.join(self.opt.label_dir,
                                             os.path.basename(fileName))

            # USE CV2 read images, because of using gray scale images, no matter the RGB orders

            mat_img = cv2.imread(self.mat_img_path)
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return
            # self.image = image.scaled(self.graphicsView.size(), Qt.IgnoreAspectRatio)
            self.image = image.scaled(QSize(512, 512), Qt.IgnoreAspectRatio)

            self.mat_img = cv2.resize(mat_img, (512, 512),
                                      interpolation=cv2.INTER_NEAREST)
            self.mat_img_org = self.mat_img.copy()

            self.GT_img_path = os.path.join(
                self.opt.image_dir,
                os.path.basename(fileName)[:-4] + '.jpg')
            GT_img = skimage.io.imread(self.GT_img_path)
            self.GT_img = Image.fromarray(GT_img)
            self.GT_img = self.GT_img.convert('RGB')

            self.input_img_button.setIcon(QIcon(self.GT_img_path))

            #################### add GT image
            self.update_GT_image(GT_img)

            #####################

            self.scene.reset()
            if len(self.scene.items()) > 0:
                self.scene.reset_items()
            self.scene.addPixmap(self.image)

            self.load_input_feature()
            self.run_deep_model()

    @pyqtSlot()
    def change_brush_size(self):
        self.scene.brush_size = self.brushSlider.value()
        self.brushsizeLabel.setText('Brush size: %d' % self.scene.brush_size)

    @pyqtSlot()
    def change_alpha_value(self):
        self.alpha = self.alphaSlider.value() / 20
        self.alphaLabel.setText('Alpha: %.2f' % self.alpha)

    @pyqtSlot()
    def mode_select(self, mode):
        self.modes = mode
        self.scene.modes = mode

        if mode == 0:
            self.brushButton.setStyleSheet("background-color: #85adad")
            self.recButton.setStyleSheet("background-color:")
            self.fillButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.ArrowCursor)
        elif mode == 1:
            self.recButton.setStyleSheet("background-color: #85adad")
            self.brushButton.setStyleSheet("background-color:")
            self.fillButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.ArrowCursor)
        elif mode == 2:
            self.fillButton.setStyleSheet("background-color: #85adad")
            self.brushButton.setStyleSheet("background-color:")
            self.recButton.setStyleSheet("background-color:")
            QApplication.setOverrideCursor(Qt.PointingHandCursor)

    @pyqtSlot()
    def save_img(self):

        current_time = datetime.datetime.now()
        ui_result_folder = 'ui_results'

        if not os.path.exists(ui_result_folder):
            os.mkdir(ui_result_folder)

        skimage.io.imsave(
            os.path.join(ui_result_folder,
                         str(current_time) + '_G_img.png'), self.generated_img)
        skimage.io.imsave(
            os.path.join(ui_result_folder,
                         str(current_time) + '_I.png'), self.mat_img[:, :, 0])
        skimage.io.imsave(
            os.path.join(ui_result_folder,
                         str(current_time) + '_ColorI.png'),
            color_pred(self.mat_img[:, :, 0]))

    @pyqtSlot()
    def switch_labels(self, label):
        self.scene.label = label
        self.scene.color = number_color[label]
        self.color_Button.setStyleSheet("background-color: %s;" %
                                        self.scene.color)

    @pyqtSlot()
    def undo(self):
        self.scene.undo()

    # get input images and labels
    def get_single_input(self):

        image_path = self.GT_img_path
        image = self.GT_img
        label_img = self.mat_img[:, :, 0]

        label = Image.fromarray(label_img)
        params = get_params(self.opt, label.size)
        transform_label = get_transform(self.opt,
                                        params,
                                        method=Image.NEAREST,
                                        normalize=False)
        label_tensor = transform_label(label) * 255.0
        label_tensor[label_tensor ==
                     255] = self.opt.label_nc  # 'unknown' is opt.label_nc
        label_tensor.unsqueeze_(0)

        image_tensor = torch.zeros([1, 3, 256, 256])

        # if using instance maps
        if self.opt.no_instance:
            instance_tensor = torch.Tensor([0])

        input_dict = {
            'label': label_tensor,
            'instance': instance_tensor,
            'image': image_tensor,
            'path': image_path,
        }

        return input_dict

    def convert_output_image(self, generated):
        tile = self.opt.batchSize > 8
        t = tensor2im(generated, tile=tile)[0]
        return t

    def update_GT_image(self, GT_img):
        qim = QImage(GT_img.data, GT_img.shape[1], GT_img.shape[0],
                     GT_img.strides[0], QImage.Format_RGB888)
        qim = qim.scaled(QSize(256, 256),
                         Qt.IgnoreAspectRatio,
                         transformMode=Qt.SmoothTransformation)
        if len(self.GT_scene.items()) > 0:
            self.GT_scene.removeItem(self.GT_scene.items()[-1])
        self.GT_scene.addPixmap(
            QPixmap.fromImage(qim).scaled(
                QSize(512, 512), transformMode=Qt.SmoothTransformation))

    def load_average_feature(self):

        ############### load average features

        average_style_code_folder = 'styles_test/mean_style_code/mean/'
        input_style_dic = {}

        ############### hard coding for categories

        for i in range(19):
            input_style_dic[str(i)] = {}

            average_category_folder_list = glob(
                os.path.join(average_style_code_folder, str(i), '*.npy'))
            average_category_list = [
                os.path.splitext(os.path.basename(name))[0]
                for name in average_category_folder_list
            ]

            for style_code_path in average_category_list:
                input_style_dic[str(i)][style_code_path] = torch.from_numpy(
                    np.load(
                        os.path.join(average_style_code_folder, str(i),
                                     style_code_path + '.npy'))).cuda()

        self.obj_dic = input_style_dic
        # self.obj_dic_back = copy.deepcopy(self.obj_dic)

    def load_partial_average_feature(self):

        average_style_code_folder = 'styles_test/mean_style_code/mean/'

        for i, cb_status in enumerate(self.checkbox_status):
            if cb_status:

                average_category_folder_list = glob(
                    os.path.join(average_style_code_folder, str(i), '*.npy'))
                average_category_list = [
                    os.path.splitext(os.path.basename(name))[0]
                    for name in average_category_folder_list
                ]

                for style_code_path in average_category_list:
                    self.obj_dic[str(i)][style_code_path] = torch.from_numpy(
                        np.load(
                            os.path.join(average_style_code_folder, str(i),
                                         style_code_path + '.npy'))).cuda()
                if str(i) in self.style_img_mask_dic:
                    del self.style_img_mask_dic[str(i)]

        self.run_deep_model()
        self.update_snapshots()

    def load_input_feature(self):

        ############### load average features

        average_style_code_folder = 'styles_test/mean_style_code/mean/'
        input_style_code_folder = 'styles_test/style_codes/' + os.path.basename(
            self.GT_img_path)
        input_style_dic = {}
        self.label_count = []

        self.style_img_mask_dic = {}

        for i in range(19):
            input_style_dic[str(i)] = {}

            input_category_folder_list = glob(
                os.path.join(input_style_code_folder, str(i), '*.npy'))
            input_category_list = [
                os.path.splitext(os.path.basename(name))[0]
                for name in input_category_folder_list
            ]

            average_category_folder_list = glob(
                os.path.join(average_style_code_folder, str(i), '*.npy'))
            average_category_list = [
                os.path.splitext(os.path.basename(name))[0]
                for name in average_category_folder_list
            ]

            for style_code_path in average_category_list:
                if style_code_path in input_category_list:
                    input_style_dic[str(
                        i)][style_code_path] = torch.from_numpy(
                            np.load(
                                os.path.join(input_style_code_folder, str(i),
                                             style_code_path +
                                             '.npy'))).cuda()

                    if style_code_path == 'ACE':
                        self.style_img_mask_dic[str(i)] = self.GT_img_path
                        self.label_count.append(i)

                else:
                    input_style_dic[str(
                        i)][style_code_path] = torch.from_numpy(
                            np.load(
                                os.path.join(average_style_code_folder, str(i),
                                             style_code_path +
                                             '.npy'))).cuda()

        self.obj_dic = input_style_dic
        #self.obj_dic_back = copy.deepcopy(self.obj_dic)
        self.obj_dic_GT = copy.deepcopy(self.obj_dic)

        self.update_snapshots()

    def style_linear_interpolation(self):

        ui_result_folder = 'style_interpolation'

        img_list = glob('imgs/style_imgs_test/*.jpg')
        img_list.sort()

        for style_count, _ in enumerate(img_list):
            if style_count == len(img_list) - 1:
                break
            style_path_1 = img_list[style_count]
            style_path_2 = img_list[style_count + 1]

            style_path_1_folder = 'styles_test/style_codes/' + os.path.basename(
                style_path_1)
            style_path_2_folder = 'styles_test/style_codes/' + os.path.basename(
                style_path_2)

            for count_num in range(1, 21):
                alpha = count_num * 0.05

                for i, cb_status in enumerate(self.checkbox_status):

                    if cb_status and i in self.label_count:
                        input_category_folder_list_1 = glob(
                            os.path.join(style_path_1_folder, str(i), '*.npy'))
                        input_category_list_1 = [
                            os.path.splitext(os.path.basename(name))[0]
                            for name in input_category_folder_list_1
                        ]

                        input_category_folder_list_2 = glob(
                            os.path.join(style_path_2_folder, str(i), '*.npy'))
                        input_category_list_2 = [
                            os.path.splitext(os.path.basename(name))[0]
                            for name in input_category_folder_list_2
                        ]

                        if 'ACE' in input_category_list_1:
                            style_code1 = torch.from_numpy(
                                np.load(
                                    os.path.join(style_path_1_folder, str(i),
                                                 'ACE.npy'))).cuda()
                        else:
                            style_code1 = self.obj_dic_GT[str(i)]['ACE']

                        if 'ACE' in input_category_list_2:
                            style_code2 = torch.from_numpy(
                                np.load(
                                    os.path.join(style_path_2_folder, str(i),
                                                 'ACE.npy'))).cuda()
                        else:
                            style_code2 = self.obj_dic_GT[str(i)]['ACE']

                        self.obj_dic[str(i)]['ACE'] = (
                            1 - alpha) * style_code1 + alpha * style_code2

                self.run_deep_model()

                if count_num < 20:
                    skimage.io.imsave(
                        os.path.join(
                            ui_result_folder,
                            os.path.basename(style_path_1)[:-4] + '_' +
                            os.path.basename(style_path_2)[:-4] + '_' +
                            str(count_num) + '.png'), self.generated_img)
                else:
                    skimage.io.imsave(
                        os.path.join(
                            ui_result_folder,
                            os.path.basename(style_path_2)[:-4] + '.png'),
                        self.generated_img)

    def update_entire_feature(self, style_img_path):

        if style_img_path == 0:
            style_img_path = self.GT_img_path
            if style_img_path == None:
                return
            input_style_code_folder = 'styles_test/style_codes/' + os.path.basename(
                style_img_path)
        else:
            input_style_code_folder = 'styles_test/style_codes/' + os.path.basename(
                style_img_path)

        for i, cb_status in enumerate(self.checkbox_status):

            if cb_status and i in self.label_count:
                input_category_folder_list = glob(
                    os.path.join(input_style_code_folder, str(i), '*.npy'))
                input_category_list = [
                    os.path.splitext(os.path.basename(name))[0]
                    for name in input_category_folder_list
                ]

                style_code_path = 'ACE'
                if style_code_path in input_category_list:

                    if self.alpha == 1:
                        self.obj_dic[str(
                            i)][style_code_path] = torch.from_numpy(
                                np.load(
                                    os.path.join(input_style_code_folder,
                                                 str(i), style_code_path +
                                                 '.npy'))).cuda()
                    else:
                        ##################### some problems here. using the same list dic
                        self.obj_dic[str(
                            i
                        )][style_code_path] = self.alpha * torch.from_numpy(
                            np.load(
                                os.path.join(input_style_code_folder, str(i),
                                             style_code_path + '.npy'))
                        ).cuda() + (1 - self.alpha) * self.obj_dic_GT[str(
                            i)][style_code_path]

                    if style_code_path == 'ACE':
                        self.style_img_mask_dic[str(i)] = style_img_path

                elif os.path.exists(
                        os.path.join('styles_test/style_codes',
                                     os.path.basename(self.GT_img_path),
                                     str(i), style_code_path + '.npy')):
                    if self.alpha == 1:
                        self.obj_dic[str(
                            i)][style_code_path] = torch.from_numpy(
                                np.load(
                                    os.path.join(
                                        'styles_test/style_codes',
                                        os.path.basename(self.GT_img_path),
                                        str(i),
                                        style_code_path + '.npy'))).cuda()
                    else:
                        self.obj_dic[str(
                            i
                        )][style_code_path] = self.alpha * torch.from_numpy(
                            np.load(
                                os.path.join(
                                    'styles_test/style_codes',
                                    os.path.basename(self.GT_img_path), str(i),
                                    style_code_path + '.npy'))).cuda() + (
                                        1 - self.alpha) * self.obj_dic_GT[str(
                                            i)][style_code_path]

                    if style_code_path == 'ACE':
                        self.style_img_mask_dic[str(i)] = self.GT_img_path

        self.run_deep_model()
        self.update_snapshots()
        self.show_reference_image(style_img_path)

    def show_reference_image(self, im_name):

        qim = QImage(im_name).scaled(QSize(256, 256),
                                     transformMode=Qt.SmoothTransformation)
        # self.referDialogImage.setPixmap(QPixmap.fromImage(qim).scaled(QSize(512, 512), transformMode=Qt.SmoothTransformation))
        # # self.referDialog.setWindowTitle('Input:' + os.path.basename(self.GT_img_path) + '\t \t Reference:' + os.path.basename(im_name))
        # self.referDialog.show()

        self.GT_scene.addPixmap(
            QPixmap.fromImage(qim).scaled(
                QSize(512, 512), transformMode=Qt.SmoothTransformation))

    def update_snapshots(self):
        self.clean_snapshots()
        self.recorded_img_names = np.unique(
            list(self.style_img_mask_dic.values()))
        self.recorded_mask_dic = {}

        tmp_count = 0

        for i, name in enumerate(self.recorded_img_names):
            self.recorded_mask_dic[name] = [
                int(num) for num in self.style_img_mask_dic
                if self.style_img_mask_dic[num] == name
            ]

            ########## show mask option 1: masks of the style image
            rgb_mask = skimage.io.imread(
                os.path.join(os.path.dirname(self.opt.label_dir), 'vis',
                             os.path.basename(name)[:-4] + '.png'))
            gray_mask = skimage.io.imread(
                os.path.join(self.opt.label_dir,
                             os.path.basename(name)[:-4] + '.png'))

            mask_snap = np.where(
                np.isin(np.repeat(np.expand_dims(gray_mask, 2), 3, axis=2),
                        self.recorded_mask_dic[name]), rgb_mask, 255)

            if not (mask_snap == 255).all():
                self.mask_snap_style_button_list[tmp_count].setIcon(
                    QIcon(
                        QPixmap.fromImage(
                            QImage(mask_snap.data, mask_snap.shape[1],
                                   mask_snap.shape[0], mask_snap.strides[0],
                                   QImage.Format_RGB888))))

                self.snap_style_button_list[tmp_count].setIcon(QIcon(name))
                tmp_count += 1

    def clean_snapshots(self):
        for snap_style_button in self.snap_style_button_list:
            snap_style_button.setIcon(QIcon())
        for mask_snap_style_button in self.mask_snap_style_button_list:
            mask_snap_style_button.setIcon(QIcon())

    def open_snapshot_dialog(self, i):
        if i < len(self.recorded_img_names):
            im_name = self.recorded_img_names[i]
            qim = QImage(im_name).scaled(QSize(256, 256),
                                         transformMode=Qt.SmoothTransformation)
            self.snapshotDialogImage.setPixmap(
                QPixmap.fromImage(qim).scaled(
                    QSize(512, 512), transformMode=Qt.SmoothTransformation))
            self.snapshotDialog.setWindowTitle('Reference:' +
                                               os.path.basename(im_name))
            self.snapshotDialog.show()
            self.snapshotDialog.count = i
        else:
            self.snapshotDialog.setWindowTitle('Reference:')
            self.snapshotDialogImage.setPixmap(QPixmap())
            self.snapshotDialog.show()
            self.snapshotDialog.count = i
Example #14
0
    def __init__(self, opt):

        super().__init__()

        self.lock_mode = False
        self.sample_num = 10
        self.truncation_psi = 0.5
        self.snapshot = 0
        self.his_image = []
        self.at_intial_point = False

        self.keep_indexes = [
            2, 5, 25, 28, 16, 32, 33, 34, 55, 75, 79, 162, 177, 196, 160, 212,
            246, 285, 300, 329, 362, 369, 462, 460, 478, 551, 583, 643, 879,
            852, 914, 999, 976, 627, 844, 237, 52, 301, 599
        ]
        # self.keep_indexes = [i for i in range(0,100)]
        # self.keep_indexes = [0]
        self.keep_indexes = np.array(self.keep_indexes).astype(np.int)

        self.zero_padding = torch.zeros(1, 18, 1).cuda()
        self.real_scene_update.connect(self.update_real_scene)

        self.attr_order = [
            'Gender', 'Glasses', 'Yaw', 'Pitch', 'Baldness', 'Beard', 'Age',
            'Expression'
        ]
        self.lighting_order = [
            'Left->Right', 'Right->Left', 'Down->Up', 'Up->Down', 'No light',
            'Front light'
        ]

        self.init_deep_model(opt)
        self.init_data_points()

        self.setupUi(self)
        self.show()

        self.scene = GraphicsScene(self)
        # self.scene.setSceneRect(0, 0, 1024, 1024)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.show()

        self.lock_scene = GTScene(self)
        self.lockView.setScene(self.lock_scene)
        self.lockView.setAlignment(Qt.AlignCenter)
        self.lockView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.lockView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.lockView.hide()

        self.GT_scene = GTScene(self)
        self.resultView.setScene(self.GT_scene)
        self.resultView.setAlignment(Qt.AlignCenter)
        self.resultView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.resultView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.realtime_attr_thread = RealTimeAttrThread(self)

        self.realtime_light_thread = RealTimeLightThread(self)

        self.init_screen()
Example #15
0
class Ex(Ui_Form):
    real_scene_update = pyqtSignal(bool, name='update_real_scene')

    def __init__(self, opt):

        super().__init__()

        self.lock_mode = False
        self.sample_num = 10
        self.truncation_psi = 0.5
        self.snapshot = 0
        self.his_image = []
        self.at_intial_point = False

        self.keep_indexes = [
            2, 5, 25, 28, 16, 32, 33, 34, 55, 75, 79, 162, 177, 196, 160, 212,
            246, 285, 300, 329, 362, 369, 462, 460, 478, 551, 583, 643, 879,
            852, 914, 999, 976, 627, 844, 237, 52, 301, 599
        ]
        # self.keep_indexes = [i for i in range(0,100)]
        # self.keep_indexes = [0]
        self.keep_indexes = np.array(self.keep_indexes).astype(np.int)

        self.zero_padding = torch.zeros(1, 18, 1).cuda()
        self.real_scene_update.connect(self.update_real_scene)

        self.attr_order = [
            'Gender', 'Glasses', 'Yaw', 'Pitch', 'Baldness', 'Beard', 'Age',
            'Expression'
        ]
        self.lighting_order = [
            'Left->Right', 'Right->Left', 'Down->Up', 'Up->Down', 'No light',
            'Front light'
        ]

        self.init_deep_model(opt)
        self.init_data_points()

        self.setupUi(self)
        self.show()

        self.scene = GraphicsScene(self)
        # self.scene.setSceneRect(0, 0, 1024, 1024)
        self.graphicsView.setScene(self.scene)
        self.graphicsView.setAlignment(Qt.AlignCenter)
        self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.graphicsView.show()

        self.lock_scene = GTScene(self)
        self.lockView.setScene(self.lock_scene)
        self.lockView.setAlignment(Qt.AlignCenter)
        self.lockView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.lockView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.lockView.hide()

        self.GT_scene = GTScene(self)
        self.resultView.setScene(self.GT_scene)
        self.resultView.setAlignment(Qt.AlignCenter)
        self.resultView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
        self.resultView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)

        self.realtime_attr_thread = RealTimeAttrThread(self)

        self.realtime_light_thread = RealTimeLightThread(self)

        self.init_screen()

    def init_deep_model(self, opt):
        self.opt = opt
        self.model = Build_model(self.opt)
        self.w_avg = self.model.Gs.get_var('dlatent_avg')

        self.prior = cnf(512, '512-512-512-512-512', 17, 1)

        self.prior.load_state_dict(torch.load('flow_weight/modellarge10k.pt'))

        self.prior.eval()

    def init_screen(self):
        self.update_scene_image()

    def update_scene_image(self):
        qim = QImage(self.map.data, self.map.shape[1], self.map.shape[0],
                     self.map.strides[0], QImage.Format_RGB888)

        pixmap = QPixmap.fromImage(qim)
        self.scene.reset()
        if len(self.scene.items()) > 0:
            self.scene.reset_items()
        self.scene.addPixmap(pixmap)

    def update_GT_scene_image(self):

        self.at_intial_point = True

        # self.scene.pickedImageIndex = 28

        self.w_current = self.all_w[self.scene.pickedImageIndex].copy()

        self.attr_current = self.all_attr[self.scene.pickedImageIndex].copy()
        self.light_current = self.all_lights[
            self.scene.pickedImageIndex].copy()

        self.attr_current_list = [
            self.attr_current[i][0] for i in range(len(self.attr_order))
        ]

        self.light_current_list = [0 for i in range(len(self.lighting_order))]

        for i, j in enumerate(self.attr_order):
            self.slider_list[i].setValue(
                transfer_real_to_slide(j, self.attr_current_list[i]))

        for i, j in enumerate(self.lighting_order):
            self.lighting_slider_list[i].setValue(0)

        ################################  calculate attributes array first, then change the values of attributes

        self.q_array = torch.from_numpy(self.w_current).cuda().clone().detach()
        self.array_source = torch.from_numpy(self.attr_current).type(
            torch.FloatTensor).cuda()
        self.array_light = torch.from_numpy(self.light_current).type(
            torch.FloatTensor).cuda()
        self.pre_lighting_distance = [
            self.pre_lighting[i] - self.array_light
            for i in range(len(self.lighting_order))
        ]

        self.final_array_source = torch.cat(
            [self.array_light,
             self.array_source.unsqueeze(0).unsqueeze(-1)],
            dim=1)
        self.final_array_target = torch.cat(
            [self.array_light,
             self.array_source.unsqueeze(0).unsqueeze(-1)],
            dim=1)
        # print(self.q_array.shape, self.final_array_source.shape, self.zero_padding.shape)
        self.fws = self.prior(self.q_array, self.final_array_source,
                              self.zero_padding)

        self.GAN_image = self.model.generate_im_from_w_space(self.w_current)[0]

        qim = QImage(self.GAN_image.data, self.GAN_image.shape[1],
                     self.GAN_image.shape[0], self.GAN_image.strides[0],
                     QImage.Format_RGB888)

        showedImagePixmap = QPixmap.fromImage(qim)
        # showedImagePixmap = showedImagePixmap.scaled(QSize(256, 256), Qt.IgnoreAspectRatio)
        self.GT_scene.reset()
        if len(self.GT_scene.items()) > 0:
            self.GT_scene.reset_items()
        self.lock_scene.reset()
        if len(self.lock_scene.items()) > 0:
            self.lock_scene.reset_items()

        self.GT_scene.addPixmap(showedImagePixmap)
        self.lock_scene.addPixmap(showedImagePixmap)

        for i in range(15):
            self.style_button_list[i].setIcon(QIcon())

        self.style_button_list[0].setIcon(
            QIcon(showedImagePixmap.scaled(128, 128)))
        self.his_image = []
        self.his_image.append(qim.copy())

        self.at_intial_point = False

    def update_lock_scene(self):
        qim = QImage(self.GAN_image.data, self.GAN_image.shape[1],
                     self.GAN_image.shape[0], self.GAN_image.strides[0],
                     QImage.Format_RGB888)

        showedImagePixmap = QPixmap.fromImage(qim)
        if len(self.lock_scene.items()) > 0:
            self.lock_scene.reset_items()
        self.lock_scene.addPixmap(showedImagePixmap)
        self.snapshot += 1
        self.style_button_list[self.snapshot].setIcon(
            QIcon(showedImagePixmap.scaled(128, 128)))
        self.his_image.append(qim.copy())

    def update_real_scene(self):
        qim = QImage(self.GAN_image.data, self.GAN_image.shape[1],
                     self.GAN_image.shape[0], self.GAN_image.strides[0],
                     QImage.Format_RGB888)

        showedImagePixmap = QPixmap.fromImage(qim)

        self.GT_scene.addPixmap(showedImagePixmap)

    def show_his_image(self, i):

        qim = self.his_image[i]
        showedImagePixmap = QPixmap.fromImage(qim)
        if len(self.lock_scene.items()) > 0:
            self.lock_scene.reset_items()
        self.lock_scene.addPixmap(showedImagePixmap)

    def real_time_editing_thread(self, attr_index, raw_slide_value):
        self.realtime_attr_thread.render(attr_index, raw_slide_value,
                                         tf.get_default_session())

    def real_time_light_thread(self, light_index, raw_slide_value):

        self.realtime_light_thread.render(light_index, raw_slide_value,
                                          tf.get_default_session())

    def real_time_lighting(self, light_index, raw_slide_value):

        if not self.at_intial_point:

            real_value = light_invert_slide_to_real(
                self.lighting_order[light_index], raw_slide_value)

            self.light_current_list[light_index] = real_value

            ###############################
            ###############  calculate attributes array first, then change the values of attributes

            lighting_final = self.array_light.clone().detach()
            for i in range(len(self.lighting_order)):
                lighting_final += self.light_current_list[
                    i] * self.pre_lighting_distance[i]

            self.final_array_target[:, :9] = lighting_final

            self.rev = self.prior(self.fws[0], self.final_array_target,
                                  self.zero_padding, True)
            self.rev[0][0][0:7] = self.q_array[0][0:7]
            self.rev[0][0][12:18] = self.q_array[0][12:18]

            self.w_current = self.rev[0].detach().cpu().numpy()
            self.q_array = torch.from_numpy(
                self.w_current).cuda().clone().detach()

            self.fws = self.prior(self.q_array, self.final_array_target,
                                  self.zero_padding)

            self.GAN_image = self.model.generate_im_from_w_space(
                self.w_current)[0]

        else:
            pass

    def real_time_editing(self, attr_index, raw_slide_value):

        if not self.at_intial_point:

            real_value = invert_slide_to_real(self.attr_order[attr_index],
                                              raw_slide_value)

            attr_change = real_value - self.attr_current_list[attr_index]
            attr_final = attr_degree_list[
                attr_index] * attr_change + self.attr_current_list[attr_index]

            self.final_array_target[0, attr_index + 9, 0, 0] = attr_final

            self.rev = self.prior(self.fws[0], self.final_array_target,
                                  self.zero_padding, True)

            if attr_index == 0:
                self.rev[0][0][8:] = self.q_array[0][8:]

            elif attr_index == 1:
                self.rev[0][0][:2] = self.q_array[0][:2]
                self.rev[0][0][4:] = self.q_array[0][4:]

            elif attr_index == 2:

                self.rev[0][0][4:] = self.q_array[0][4:]

            elif attr_index == 3:
                self.rev[0][0][4:] = self.q_array[0][4:]

            elif attr_index == 4:
                self.rev[0][0][6:] = self.q_array[0][6:]

            elif attr_index == 5:
                self.rev[0][0][:5] = self.q_array[0][:5]
                self.rev[0][0][10:] = self.q_array[0][10:]

            elif attr_index == 6:
                self.rev[0][0][0:4] = self.q_array[0][0:4]
                self.rev[0][0][8:] = self.q_array[0][8:]

            elif attr_index == 7:
                self.rev[0][0][:4] = self.q_array[0][:4]
                self.rev[0][0][6:] = self.q_array[0][6:]

            self.w_current = self.rev[0].detach().cpu().numpy()
            self.q_array = torch.from_numpy(
                self.w_current).cuda().clone().detach()

            self.fws = self.prior(self.q_array, self.final_array_target,
                                  self.zero_padding)

            self.GAN_image = self.model.generate_im_from_w_space(
                self.w_current)[0]
        else:
            pass

    def reset_Wspace(self):

        self.update_GT_scene_image()

    def init_data_points(self):

        self.raw_w = pickle.load(open("data/sg2latents.pickle", "rb"))

        self.raw_TSNE = np.load('data/TSNE.npy')

        self.raw_attr = np.load('data/attributes.npy')

        self.raw_lights2 = np.load('data/light.npy')
        self.raw_lights = self.raw_lights2

        self.all_w = np.array(self.raw_w['Latent'])[self.keep_indexes]
        self.all_attr = self.raw_attr[self.keep_indexes]
        self.all_lights = self.raw_lights[self.keep_indexes]

        light0 = torch.from_numpy(self.raw_lights2[8]).type(
            torch.FloatTensor).cuda()
        light1 = torch.from_numpy(self.raw_lights2[33]).type(
            torch.FloatTensor).cuda()
        light2 = torch.from_numpy(self.raw_lights2[641]).type(
            torch.FloatTensor).cuda()
        light3 = torch.from_numpy(self.raw_lights2[547]).type(
            torch.FloatTensor).cuda()
        light4 = torch.from_numpy(self.raw_lights2[28]).type(
            torch.FloatTensor).cuda()
        light5 = torch.from_numpy(self.raw_lights2[34]).type(
            torch.FloatTensor).cuda()

        self.pre_lighting = [light0, light1, light2, light3, light4, light5]

        self.X_samples = self.raw_TSNE[self.keep_indexes]

        self.map = np.ones([1024, 1024, 3], np.uint8) * 255

        for point in self.X_samples:
            ######### don't use np.uint8 in tuple((point*1024).astype(int))
            cv2.circle(self.map, tuple((point * 1024).astype(int)), 6,
                       (0, 0, 255), -1)

        self.nbrs = NearestNeighbors(n_neighbors=1,
                                     algorithm='ball_tree').fit(self.X_samples)

    @pyqtSlot()
    def lock_switch(self):
        self.lock_mode = not self.lock_mode

        if self.lock_mode:
            self.brushButton.setStyleSheet("background-color:")

            self.lockView.show()
            self.graphicsView.hide()
        else:
            self.brushButton.setStyleSheet("background-color:")
            self.brushButton.setStyleSheet("background-color: #85adad")
            self.graphicsView.show()
            self.lockView.hide()