Exemple #1
0
    def __init__(self, stage, configs, tta=False, tta_size=48):
        self._stage = stage
        self._configs = configs
        self._tta = tta
        self._tta_size = tta_size

        self._image_size = (configs['image_size'], configs['image_size'])

        self._data = list(paths.list_images(
                configs['data_path']))
        random.shuffle(self._data)

        self._transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])
Exemple #2
0
    def __init__(self, configs: Configs):

        self.device = configs.device
        self.path = configs.datasetPath
        self.size = (configs.image_size, configs.image_size)

        self.data = torch.load(self.path)
        self.length = len(self.data)

        print(self.length, 'images in', self.path)

        self.transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize(self.size),
            transforms.ToTensor(),
        ])
def get_transform(args):
    if args.dataset == 'celeba':
        crop_size = 108
        re_size = 64
        offset_height = (218 - crop_size) // 2
        offset_width = (178 - crop_size) // 2
        crop = lambda x: x[:, offset_height:offset_height + crop_size,
                offset_width:offset_width + crop_size]
        preprocess = transforms.Compose(
                [transforms.ToTensor(),
                    transforms.Lambda(crop),
                    transforms.ToPILImage(),
                    transforms.Scale(size=(re_size, re_size), interpolation=Image.BICUBIC),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)])
    return preprocess
Exemple #4
0
def eval():
    model = Network(True).cuda()
    model.load_state_dict(load_checkpoint('./noise_models', best_or_latest='best'))
    model.eval()
    from torchvision.transforms import transforms
    from PIL import Image
    img = Image.open('./003/1.jpg')
    trans_tensor = transforms.ToTensor()
    trans_srgb = transforms.ToPILImage()
    img = trans_tensor(img).unsqueeze(0).cuda()
    pred = model(img).squeeze()
    print('min:', torch.min(pred), 'max:', torch.max(pred))
    pred = pred / torch.max(pred)
    pred = pred.cpu()
    trans_srgb(pred).save('./003/1_pred.png', quality=100)
    print('OK!')
Exemple #5
0
def predict():
    image_size = 64
    channels_img = 3
    channels_noise = 128
    features_g = 64
    fixed_noise = torch.randn(image_size, channels_noise, 1, 1)
    netG = Generator(channels_noise, channels_img, features_g)
    netG.load_state_dict(torch.load('g.pth', map_location=device))
    fake = netG(fixed_noise)
    newFake = transforms.Compose([
        transforms.Normalize((-1, -1, -1), (2, 2, 2)),
        transforms.ToPILImage(),
        transforms.Resize(512)
    ])(fake[0])
    print(newFake)
    return serve_pil_image(newFake)
def visualize_model(model):
    model.eval()
    with torch.no_grad():
        for i, data in enumerate(data_loaders['val']):
            inputs = data['image']
            labels_classes = data['classes'].to(device)

            x_classes = model(inputs.to(device))
            x_classes = x_classes.view(-1, 2)
            _, preds_classes = torch.max(x_classes, 1)

            print(inputs.shape)
            plt.imshow(transforms.ToPILImage()(inputs.squeeze(0)))
            plt.title('predicted classes: {}\n ground-truth classes:{}'.format(
                CLASSES[preds_classes], CLASSES[labels_classes]))
            plt.show()
def run_model(model_path, discrim_path):
    model = Deblurrer()
    model.load_state_dict(
        torch.load(model_path, map_location=torch.device('cpu')))
    model.eval()

    discriminator = Discriminator(3, 64)
    discriminator.load_state_dict(
        torch.load(discrim_path, map_location=torch.device('cpu')))
    discriminator.eval()

    dataset = LFWC(["../data/train/faces_blurred"], "../data/train/faces")
    #dataset = FakeData(size=1000, image_size=(3, 128, 128), transform=transforms.ToTensor())
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=1,
                                              shuffle=True)
    for data in data_loader:
        blurred_img = Variable(data['blurred'])
        nonblurred = Variable(data['nonblurred'])

        # Should be near zero
        discrim_output_blurred = discriminator(blurred_img).view(
            -1).data.item()
        # Should be naer one
        discrim_output_nonblurred = discriminator(nonblurred).view(
            -1).data.item()

        #im = Image.open(image_path)
        #transform = transforms.ToTensor()
        transformback = transforms.ToPILImage()
        plt.imshow(transformback(blurred_img[0]))
        plt.title('Blurred, Discrim value: ' + str(discrim_output_blurred))
        plt.show()
        plt.imshow(transformback(nonblurred[0]))
        plt.title('Non Blurred, Discrim value: ' +
                  str(discrim_output_nonblurred))
        plt.show()

        out = model(blurred_img)
        discrim_output_model = discriminator(out).view(-1).data.item()
        #print(out.shape)
        outIm = transformback(out[0])

        plt.imshow(outIm)
        plt.title('Model out, Discrim value: ' + str(discrim_output_model))
        plt.show()
Exemple #8
0
def visualize_model(model):
    model.eval()
    with torch.no_grad():
        for i, data in enumerate(data_loaders['val']):
            inputs = data['image']
            labels_species = data['species'].to(device)

            x_species = model(inputs.to(device))
            x_species = x_species.view(-1, 3)  #numpy = resize
            _, preds_species = torch.max(
                x_species, 1)  #(tensor, dim) return tensor, indices

            print(inputs.shape)
            plt.imshow(transforms.ToPILImage()(inputs.squeeze(0)))
            plt.title('predicted species: {}\n ground-truth species:{}'.format(
                SPECIES[preds_species], SPECIES[labels_species]))
            plt.show()
Exemple #9
0
    def __init__(self, stage, configs, tta=False, tta_size=48):
        self._stage = stage
        self._tta = tta
        self._tta_size = tta_size
        self._configs = configs

        self._image_size = (224, 224)
        self._data = pd.read_csv(
            os.path.join(configs['data_path'], '{}.csv'.format(stage)))

        self._path_list = self._data['filepath'].tolist()
        self._emotions = pd.get_dummies(self._data['emotions'])

        self._transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])
Exemple #10
0
 def augmentation(self, data, gr_ids, policy):
     aug_imgs = []
     if "cifar" in C.get()["dataset"]:
         mean, std = _CIFAR_MEAN, _CIFAR_STD
     elif "svhn" in C.get()["dataset"]:
         mean, std = _SVHN_MEAN, _SVHN_STD
     # applied_policies = []
     for gr_id, img in zip(gr_ids, data):
         pil_img = transforms.ToPILImage()(UnNormalize()(img.cpu()))
         _aug = Augmentation(policy[int(gr_id)])
         aug_img = _aug(pil_img)
         aug_img = self.transform(aug_img)
         aug_imgs.append(aug_img)
         # applied_policy = _aug.policy # Todo
         # applied_policies.append(applied_policy)
     aug_imgs = torch.stack(aug_imgs)
     return aug_imgs.cuda()  #, applied_policies
Exemple #11
0
def prediction_to_img(pred):

    _, C, H, W = pred.shape
    pred = torch.reshape(pred, (C, H, W))
    _, indices = torch.max(pred, dim=0)

    indices = torch.reshape(indices, (1, H, W))
    output = torch.cat((indices, indices, indices))
    output_img = torch.zeros(3, H, W)

    for i in range(C):

        R, G, B = color_dict[i]
        color_mask = torch.cat((R*torch.ones((1, H, W)), G*torch.ones((1, H, W)), B*torch.ones((1, H, W))))
        output_img = torch.where(output == i, color_mask, output_img)

    return transforms.ToPILImage()(output_img/255).convert('RGB')
Exemple #12
0
def parse_augmentation(augmentation_spec, image_size):
    """ Loads the data augmentation configuration

  Args:
      augmentation_spec: DataAugmentation instance
      image_size: the output image size

  Returns: torchvision.transforms object with the corresponding augmentation

  """
    def gaussian_noise(x, std):
        """ Helper function to add gaussian noise

    Args:
        x: input
        std: standard deviation for the normal distribution

    Returns: perturbed image

    """
        x += torch.randn(x.size()) * std
        return x

    def rescale(x):
        """ Rescales the image between -1 and 1

    Args:
        x: image

    Returns: rescaled image

    """
        return (x * 2) - 1

    _transforms = []
    if augmentation_spec.enable_gaussian_noise and \
        augmentation_spec.gaussian_noise_std > 0:
        f = partial(gaussian_noise, std=augmentation_spec.gaussian_noise_std)
        _transforms.append(transforms.Lambda(f))
    if augmentation_spec.enable_jitter and \
        augmentation_spec.jitter_amount > 0:
        _transforms.append(transforms.ToPILImage())
        amount = augmentation_spec.jitter_amount
        _transforms.append(transforms.RandomCrop(image_size, padding=amount))
    return _transforms
Exemple #13
0
def get_random_test_samples_dataloader(
    parameters: dict,
    nb_sample: int = 10,
    transform: Optional[Callable] = None,
    classes: Optional[List[int]] = None,
) -> Tuple[Dataset, DataLoader]:
    """ Return a random set of test samples """

    dataset_test_image = dataset_from_config(
        parameters, "test",
        transforms.Compose([
            transform,
            transforms.Normalize([-1], [2]),
            transforms.ToPILImage()
        ]))
    dataset_test_tensors = dataset_from_config(parameters, "test", transform)

    if classes is not None:
        y = dataset_test_image.y()
        y = torch.tensor(y)

        # Find number of sample to choose for each class ((nb_sample / len(classes) +/- 1)
        split = (
            torch.arange(nb_sample + len(classes) - 1, nb_sample - 1, -1) //
            len(classes)).tolist()
        subset_indices = []
        for i, class_ in enumerate(classes):
            # Indices in the dataset corresponding to this class
            indices_for_class = torch.where(y == class_)[0]
            # Add random indices corresponding to this class
            subset_indices += indices_for_class[torch.randint(
                len(indices_for_class), (split[i], ))].tolist()
    else:
        subset_indices = [
            random.randrange(len(dataset_test_image)) for _ in range(nb_sample)
        ]

    subset_images = Subset(dataset_test_image, subset_indices)
    subset_tensors = Subset(dataset_test_tensors, subset_indices)
    dataloader_tensors = DataLoader(subset_tensors,
                                    batch_size=nb_sample,
                                    num_workers=0,
                                    shuffle=False)

    return subset_images, dataloader_tensors
Exemple #14
0
    def __init__(self):
        self.net = cv2.dnn.readNetFromCaffe(
            os.path.join(FACE_ROOT_DIR, "deploy.prototxt.txt"),
            os.path.join(FACE_ROOT_DIR, "res10_300x300_ssd_iter_140000.caffemodel")
        )
        self.transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])

        # load configs and set random seed
        self.configs = json.load(open(os.path.join(FACE_ROOT_DIR, "configs", "fer2013_config.json")))
        self.image_size = (self.configs["image_size"], self.configs["image_size"])

        self.model = resmasking_dropout1(in_channels=3, num_classes=7)
        self.model.cuda()
        self.state = torch.load(
            os.path.join(FACE_ROOT_DIR, "Z_resmasking_dropout1_rot30_2019Nov30_13.32")
        )
        self.model.load_state_dict(self.state["net"])
        self.model.eval()
Exemple #15
0
def get_image_dataloader(path_to_data, batch_size=16):
    my_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.RandomRotation([-30, 30]),
        # transforms.Normalize([],[]) # get mean and std
        transforms.ToTensor(),
    ])
    dataset = ImageFolder(root=path_to_data,
                          transform=my_transforms,
                          is_valid_file=check_valid)
    print(len(dataset.classes))
    dataloader = DataLoader(dataset=dataset,
                            batch_size=batch_size,
                            shuffle=True)
    return dataloader, dataset
Exemple #16
0
def prepare_data(images, color_mode='BGR', new_shape=416, color=(127.5, 127.5, 127.5), mode='square'):
    images_ok = np.zeros((images.shape[0], new_shape, new_shape, 3), dtype=images[0].dtype)
    images_tensor = torch.zeros((images.shape[0], 3, new_shape, new_shape), dtype=torch.float32)
    for i in range(len(images)):
        if color_mode == 'BGR':
            images[i] = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)
        elif color_mode == 'RGB':
            pass
        else:
            raise NotImplementedError
        images_ok[i], _, _, _ = letterbox(images[i], new_shape, color, mode)

        images_tensor[i] = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])(images_ok[i])

    return images_tensor
Exemple #17
0
def tensors_to_images(tensors, filenames, valid_data_path):
    quality_val = 90
    transform = transforms.ToPILImage()
    for i in range(len(tensors)):
        for volume in tensors[i]:
            volume = volume.cpu().numpy().transpose((1, 2, 0))
            for j in range(volume.shape[2]):
                channel = volume[:, :, j]
                minimum = np.min(channel)
                maximum = np.max(channel)
                volume[:, :,
                       j] = 255 * (channel - minimum) / (maximum - minimum)
            image = transform(np.uint8(volume))
            file_name = filenames[i][0].split('.')
            image.save(os.path.join(valid_data_path,
                                    file_name[0] + '_valid.jpg'),
                       'JPEG',
                       quality=quality_val)
Exemple #18
0
    def __init__(self, stage, configs, tta=False, tta_size=48):
        self._stage = stage
        self._configs = configs
        self._tta = tta
        self._tta_size = tta_size

        self._image_size = (configs["image_size"], configs["image_size"])

        self._data = pd.read_csv(
            os.path.join(configs["data_path"], "{}.csv".format(stage)))

        self._pixels = self._data["pixels"].tolist()
        self._emotions = pd.get_dummies(self._data["emotion"])

        self._transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])
Exemple #19
0
def test(args):
    model = model_type(num_classes)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    dataset = image_dataset(False, x_transforms, y_transforms)
    dataloaders = DataLoader(dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, label_y in dataloaders:
            predict_y = model(x)
            predict_y = utils.get_predict_image(predict_y[0])
            plt.subplot(1, 2, 1)
            plt.imshow(predict_y)
            plt.subplot(1, 2, 2)
            plt.imshow(transforms.ToPILImage()(label_y[0]))
            plt.pause(0.01)
        plt.show()
def evaluate_metrics(model_path):
    model = Deblurrer()
    model.load_state_dict(
        torch.load(model_path, map_location=torch.device('cpu')))
    model.eval()
    dataset = LFWC(["../data/test/faces_blurred"], "../data/test/faces")
    #dataset = FakeData(size=1000, image_size=(3, 128, 128), transform=transforms.ToTensor())
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=1,
                                              shuffle=True)
    count = 0
    avg0 = 0
    avg1 = 0
    avgs = 0
    avgs1 = 0
    for data in data_loader:
        blurred_img = Variable(data['blurred'])
        nonblurred = Variable(data['nonblurred'])
        #im = Image.open(image_path)
        #transform = transforms.ToTensor()
        transformback = transforms.ToPILImage()

        out = model(blurred_img)
        #print(out.shape)
        outIm = transformback(out[0])
        nonblurred = transformback(nonblurred[0])
        blurred = transformback(blurred_img[0])
        ps = psnr(outIm, nonblurred)
        avg0 += ps
        ps1 = psnr(blurred, nonblurred)
        avg1 += ps1
        similarity = ssim1(outIm, nonblurred)
        avgs += similarity
        sim1 = ssim1(blurred, nonblurred)
        avgs1 += sim1
        count += 1
    avg0 /= count
    avg1 /= count
    avgs /= count
    avgs1 /= count
    print(avg0)
    print(avg1)
    print(avgs)
    print(avgs1)
Exemple #21
0
def test_image():
    #model = Unet(3, 3)
    model = Unet(1, 1)
    model.load_state_dict(
        torch.load('ckp_xin/fd3model.pth', map_location='cpu'))
    model.eval()
    '''
    img = Image.open("data/aug/24.bmp")
    img = x_transforms(img)
    img = torch.unsqueeze(img,0)
    '''
    #img_x = pydicom.dcmread("data/aug/32.dcm")
    #img_x = WL(img_x,150,300)
    #img_x = Image.fromarray(img_x)
    img_x = Image.open("data/aug/76.bmp")
    img_x = img_x.convert('L')
    #img_x.save('data/aug/32dtp.bmp')
    #img_x.show()

    img_x = x_transforms(img_x)
    img_x = torch.unsqueeze(img_x, 0)

    labels = Image.open("data/aug/76_mask.bmp")
    labels = labels.convert('L')
    labels = y_transforms(labels)
    labels = torch.unsqueeze(labels, 0)

    out = model(img_x)
    print(IOU(out.to("cpu"), labels.to("cpu")).item())
    '''
    img_mask = Image.open("data/aug/166_mask.png")
    img_mask = y_transforms(img_mask)
    img_mask = torch.unsqueeze(img_mask,0)

    out = model(img)
    dice = dice_coeff(out,img_mask)
    print(dice.detach().numpy())
    '''

    trann = transforms.ToPILImage()
    out = torch.squeeze(out)
    out = trann(out)
    out.save("data/aug/76_maskfd3.bmp")
Exemple #22
0
def test():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(args.ckp))
    liver_dataset = LiverDataset("data/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    with torch.no_grad():
        for x, _, x_path in tqdm(dataloaders):
            x_path = str(x_path).split("/")
            x = x.to(device)
            y = model(x)
            img_numpy = y[0].cpu().float().numpy()
            img_numpy = (np.transpose(img_numpy, (1, 2, 0)))
            img_numpy = (img_numpy >= 0.5) * 255
            img_out = img_numpy.astype(np.uint8)
            imgs = transforms.ToPILImage()(img_out)
            imgs.save('result/' + x_path[2][:-3])
Exemple #23
0
    def __init__(self, stage, fold_idx, configs):
        """ fold_idx: test fold """
        self._configs = configs
        self._stage = stage
        self._fold_idx = fold_idx
        self._data = []

        for fold_path in glob.glob("./saved/data/CK+/npy_folds/*.npy"):
            fold_name = os.path.basename(fold_path)
            if str(fold_idx) == fold_name[5:-4] and stage == "test":
                self._data = np.load(fold_path, allow_pickle=True).tolist()
            if str(fold_idx) != fold_name[5:-4] and stage == "train":
                self._data.extend(
                    np.load(fold_path, allow_pickle=True).tolist())

        self._image_size = (configs["image_size"], configs["image_size"])
        self._transform = transforms.Compose(
            [transforms.ToPILImage(),
             transforms.ToTensor()])
Exemple #24
0
def _test_process(model,
                  te_loader,
                  length,
                  save_pre: bool = False,
                  save_path: str = "") -> dict:
    cal_total_metrics = CalTotalMetric(num=length, beta_for_wfm=1)
    to_pil = transforms.ToPILImage()

    tqdm_iter = tqdm(enumerate(te_loader), total=len(te_loader), leave=False)
    for test_batch_id, test_data in tqdm_iter:
        tqdm_iter.set_description(f"{exp_name}: te=>{test_batch_id + 1}")
        with torch.no_grad():
            in_imgs, in_names, in_mask_paths = test_data
            in_imgs = in_imgs.cuda(non_blocking=True)
            outputs = model(in_imgs)
        outputs_np = outputs.sigmoid().cpu().detach()

        for item_id, out_item in enumerate(outputs_np):
            gimg_path = os.path.join(in_mask_paths[item_id])
            gt_img = Image.open(gimg_path).convert("L")
            out_img = to_pil(out_item).resize(gt_img.size)

            if save_pre:
                oimg_path = os.path.join(save_path, in_names[item_id] + ".png")
                out_img.save(oimg_path)

            gt_img = np.asarray(gt_img)
            out_img = np.asarray(out_img)

            # 归一化
            gt_img = gt_img / (gt_img.max() + 1e-8)
            gt_img = np.where(gt_img > 0.5, 1, 0)
            out_img_max = out_img.max()
            out_img_min = out_img.min()
            if out_img_max == out_img_min:
                out_img = out_img / 255
            else:
                out_img = (out_img - out_img_min) / (out_img_max - out_img_min)

            # 更新指标记录
            cal_total_metrics.update(out_img, gt_img)
    results = cal_total_metrics.show()
    return results
Exemple #25
0
def decode(x, bottleneck):

    decoder = Decoder(int(bottleneck / 512))

    if torch.cuda.is_available():
        decoder = Decoder(int(bottleneck / 512)).cuda()
        decoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/decoder'))
    else:
        decoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/decoder',
                       map_location=torch.device('cpu')))

    decoder.eval()

    if torch.cuda.is_available():
        d_1 = (torch.zeros(1, 512, 16, 16).cuda(), torch.zeros(1, 512, 16,
                                                               16).cuda())
        d_2 = (torch.zeros(1, 512, 32, 32).cuda(), torch.zeros(1, 512, 32,
                                                               32).cuda())
        d_3 = (torch.zeros(1, 256, 64, 64).cuda(), torch.zeros(1, 256, 64,
                                                               64).cuda())
        d_4 = (torch.zeros(1, 128, 128,
                           128).cuda(), torch.zeros(1, 128, 128, 128).cuda())
    else:
        d_1 = (torch.zeros(1, 512, 16, 16), torch.zeros(1, 512, 16, 16))
        d_2 = (torch.zeros(1, 512, 32, 32), torch.zeros(1, 512, 32, 32))
        d_3 = (torch.zeros(1, 256, 64, 64), torch.zeros(1, 256, 64, 64))
        d_4 = (torch.zeros(1, 128, 128, 128), torch.zeros(1, 128, 128, 128))

    binary = np.unpackbits(x, axis=-1)
    binary = torch.from_numpy(binary).float() * 2 - 1
    result = torch.zeros(1, 3, 256, 256)
    for t in range(rnn_num):
        outputs, d_1, d_2, d_3, d_4 = decoder(binary[t], d_1, d_2, d_3, d_4)
        result = result + outputs
    toImage = transforms.ToPILImage()
    image = toImage(result.squeeze())

    return image
Exemple #26
0
 def show_batch(self, rows=3, imgsize=(20, 20), figsize=(10, 10)):
     with plt_inline():
         old_backend = matplotlib.get_backend()
         Xs, ys = next(iter(self.train_dl))
         Xs = Xs[:rows * rows]
         ys = ys[:rows * rows]
         axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize)
         invnormalize = self.inv_normalize()
         for x, y, ax in zip(Xs, ys, axs.flatten()):
             x = x.cpu()
             x = invnormalize(x)
             #x = (1/(2*2.25)) * x / 0.25 + 0.5
             im = transforms.ToPILImage()(x).convert("RGB")
             im = transforms.Resize([100, 100])(im)
             ax.imshow(im)
             ax.set_title(f'y={y}')
         for ax in axs.flatten()[len(Xs):]:
             ax.axis('off')
         plt.tight_layout()
         plt.show()
def superresolve(img, seed=2019):
    transform = transforms.Compose([transforms.ToTensor()])
    inputs = transform(img)
    if torch.cuda.is_available():
        inputs = inputs.cuda()
    outputs = net(inputs.unsqueeze(0)).squeeze()
    if torch.cuda.is_available():
        outputs = outputs.cuda()
    torch.manual_seed(seed)
    noise = tdist.Normal(torch.tensor([0.0]), torch.tensor([0.1]))
    tmp = noise.sample(outputs.size()).squeeze(3)
    for i in range(3):
    	for j in range(128):
    		for k in range(128):
    			tmp[i][2*j][2*k] = 0.0
    outputs += tmp
    toImage = transforms.ToPILImage()
    res = toImage(outputs)

    return res
    def _predict_phase_(self):
        dataset, indices = self.get_predict_dataset()
        self.model.eval()
        with torch.no_grad():
            for idx in indices:
                if idx < 0 or idx >= len(dataset):
                    continue
                sample = dataset[idx]

                inputs = sample['image']
                inputs = inputs.expand(1, *inputs.size()).to(self.device)

                ground_truth = sample['classes']

                outputs = self.model(inputs)
                _, predicted = torch.max(outputs, 1)

                plt.imshow(transforms.ToPILImage()(inputs.squeeze(0)))
                plt.title('predicted classes: {}\nground-truth classes: {}'.format(
                    CLASSES[predicted.item()], CLASSES[ground_truth]))
                plt.show()
    def __getitem__(self, idx):
        img_path = os.path.join('./data', self.dataframe.iloc[idx, 2])
        img = sio.imread(img_path, mode='RGB')
        img = transforms.ToPILImage()(img)
        if self.LAP:
            age, std = self.dataframe.iloc[idx, 0].astype(
                'float'), self.dataframe.iloc[idx, 3].astype('float')
        else:
            age = self.dataframe.iloc[idx, 0].astype('float')

        if age not in range(0, 100):
            age = 0

        if not self.transform == None:
            img = self.transform(
                img)  # ndarray => torch.Tensor  + Normalization

        if self.LAP:
            return img, age, std
        else:
            return img, age
    def __init__(self, stage, configs, tta=False, tta_size=48):
        self._stage = stage
        self._configs = configs
        self._tta = tta
        self._tta_size = tta_size

        self._image_size = (configs['image_size'], configs['image_size'])

        if stage == 'train':
            self._data = pd.read_csv(
                os.path.join(configs['data_path'], 'train.npy'))
        elif stage == 'val':
            self._data = pd.read_csv(
                os.path.join(configs['data_path'], 'val.npy'))
        else:
            raise Exception("just train or val")

        self._transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])