예제 #1
0
 def __init__(self, imageFileName, ax):
     """Constructor."""
     self.ax = ax
     self.imAxes = None
     self.image = mpimg.open(imageFileName)
     self.figure = self.ax.get_figure()
     self.dx = 0
     self.dy = 0
예제 #2
0
 def __init__(self, imageFileName, ax):
     """Constructor."""
     self.ax = ax
     self.imAxes = None
     self.image = mpimg.open(imageFileName)
     self.figure = self.ax.get_figure()
     self.dx = 0
     self.dy = 0
예제 #3
0
    def __init__(self, input, type):
        if type == 'base64':
            base64String = re.sub('^data:image/.+;base64,', '',
                                  input).decode('base64')

            image = Image.open(cStringIO.StringIO(image_data))
            self.__image = image
            self.__backupImage = deepcopy(self.__image)
            return
        else:
            raise ValueError('Cutter: This type is not supported')
예제 #4
0
    def __init__(self, input, type):
        if type == 'base64':
            base64String = re.sub('^data:image/.+;base64,',
                                  '', input).decode('base64')

            image = Image.open(cStringIO.StringIO(image_data))
            self.__image = image
            self.__backupImage = deepcopy(self.__image)
            return
        else:
            raise ValueError('Cutter: This type is not supported')
예제 #5
0
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

objectDetectorTrans = transforms.Compose([transforms.ToTensor()])

t0 = time.time()
with torch.no_grad():
    for i in range(len(filelist)):
        print("current file: ", filelist[i])
        #Alle filer follow the pattern resolution_className_index. We extract these using split
        splits = filelist[i].split("_")

        #the object detector needs the entire lowRes picture. We load that in as tensor
        #of shape (1,3,W,H)
        imagePIL = Image.open(os.path.join(valPath, filelist[i]))
        image = objectDetectorTrans(imagePIL)
        image_z = torch.zeros(1, 3, image.shape[1], image.shape[2])
        image_z[0] = image
        image_z = image_z.to(device)

        #We make inference. Ignore everything other than "boxes"
        detections = objectDetector(image_z)[0]["boxes"]
        print("Box points: ", detections)
        t1 = time.time()
        print(t1 - t0, " sec")
        t0 = t1

        #looping through all suggested boxes
        for detection in detections:
            #we need the box points in the highRes picture. For that we need
예제 #6
0
        matching.append(image_similarity(src))
    for i in range(batch_size):
        src1 = ori_imgs[i].mul_(255).add_(0.5).clamp_(
            0, 255).cpu().numpy().transpose((0, 2, 3, 1))
        src2 = new_imgs[i].mul_(255).add_(0.5).clamp_(
            0, 255).cpu().numpy().transpose((0, 2, 3, 1))

        ori_imgs[i] = ori_imgs[i]
        new_imgsp[i] = ori_imgs[i]

    ori_index, ori_max = img_match(ori_imgs, matching)
    new_index, new_max = img_match(new_imgs, matching)

    for i in range(batch_size):
        ori_result = img_joint(ori_imgs[i],
                               mtimage.open(imgs_list[ori_index[i]]))
        new_result = img_joint(new_imgs[i],
                               mtimage.open(imgs_list[new_index[i]]))

        ori_result = Image.fromarray((ori_result * 255).astype(np.uint8))
        new_result = Image.fromarray((new_result * 255).astype(np.uint8))

        ori_result.save(
            os.path.join(test_path, 'ori_img' + str(ori_max) + '.jpg'))
        new_result.save(
            os.path.join(test_path, 'new_img' + str(new_max) + '.jpg'))

    # # dataroot = '/home/admin11/Data_test/celeba'
    # dataroot = '/home/admin11/Data_test/test_1000'
    # # test_path = '/home/admin11/1.my_zone/test_imgs'
    # test_path = '/home/admin11/1.my_zone/test_imgs'