Ejemplo n.º 1
0
    def __getitem__(self, index):
        path = os.path.join(self.data_path, self.samples[index])
        #img = default_loader(path)
        img = tif_loader(path)
        #print('Test ', np.array(img).shape)
        if self.random_crop:
            # imgw, imgh = img.size
            # if imgh < self.image_shape[0] or imgw < self.image_shape[1]:
            #     img = transforms.Resize(min(self.image_shape))(img)
            # img = transforms.RandomCrop(self.image_shape)(img)
            h, w, _ = img.shape
            targetH, targetW = self.image_shape[0], self.image_shape[1]
            randomH = np.random.randint(0, h - targetH)
            randomW = np.random.randint(0, w - targetW)
            imgCrop = img[randomH:(randomH + targetH),
                          randomW:(randomW + targetW), :]
            imgCrop = np.transpose(imgCrop, [2, 0, 1])
        else:
            img = transforms.Resize(self.image_shape)(img)
            img = transforms.RandomCrop(self.image_shape)(img)
        imgTensor = torch.from_numpy(imgCrop)
        #img = transforms.ToTensor()(img)  # turn the image to a tensor
        img = normalize(imgTensor)
        #img = transfer2tensor(img)

        if self.return_name:
            return self.samples[index], img
        else:
            return img
Ejemplo n.º 2
0
    def __getitem__(self, index):
        file_idx = np.where(self.samples > index)[0][0]
        if file_idx > 0:
            index -= self.samples[file_idx - 1]
        with h5py.File(self.mouse_files[file_idx], 'r') as f:
            img = np.uint8(f['frames'][index] / 100 * 255)
            img = img[:, :, None]
            img = Image.fromarray(np.tile(img, (1, 1, 3)))

        if self.random_crop:
            imgw, imgh = img.size
            if imgh < self.image_shape[0] or imgw < self.image_shape[1]:
                img = transforms.Resize(min(self.image_shape))(img)
            img = transforms.RandomCrop(self.image_shape)(img)
        else:
            img = transforms.Resize(self.image_shape)(img)
            img = transforms.RandomCrop(self.image_shape)(img)

        img = transforms.ToTensor()(img)  # turn the image to a tensor
        img = normalize(img)

        if self.return_name:
            return self.mouse_files[file_idx], img
        else:
            return img
Ejemplo n.º 3
0
def _get_generated_image(
    x,
    mask=None,
):
    # global generated_image_idx
    global netG
    if mask is None:
        mask = np.zeros(x.shape, dtype=np.uint8)

    if len(x.shape) == 2:
        x = PIL.Image.fromarray(np.stack((x, ) * 3, axis=-1))
    elif len(x.shape) == 3 and x.shape[-1] == 3:
        x = PIL.Image.fromarray(x)
    else:
        print(x.shape)
        print("dim error")
        import sys
        sys.exit(0)
    mask = PIL.Image.fromarray(mask)
    x = transforms.Resize(config['image_shape'][:-1])(x)

    x = transforms.CenterCrop(config['image_shape'][:-1])(x)
    mask = transforms.Resize(config['image_shape'][:-1])(mask)
    mask = transforms.CenterCrop(config['image_shape'][:-1])(mask)
    x = transforms.ToTensor()(x)
    mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
    x = normalize(x)
    x = x * (1. - mask)
    x = x.unsqueeze(dim=0)
    mask = mask.unsqueeze(dim=0)

    if cuda:
        # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        x = x.cuda()
        mask = mask.cuda()

    # Inference
    x1, x2, offset_flow, feature = netG(x, mask)
    inpainted_result = x2 * mask + x * (1. - mask)
    if cuda:
        inpainted_result = inpainted_result.cpu()
    np_inpainted_result = np.rollaxis(
        np.uint8(np.squeeze(inpainted_result.detach().numpy(), axis=0)), 0, 3)
    assert np_inpainted_result.shape == (256, 256, 3)
    # return 255-np_inpainted_result

    # vutils.save_image(inpainted_result, args.output, padding=0, normalize=True)
    # from PIL import Image
    grid = vutils.make_grid(inpainted_result)
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
    ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to(
        'cpu', torch.uint8).numpy()

    return ndarr, feature
Ejemplo n.º 4
0
    def train(self, X, Y, G_X, G_Y):
        '''
        :param X:   placeholder for X
        :param Y:   placeholder for Y
        :param G_X:     tfrecords generator X
        :param G_Y:     tfrecords generator Y
        :param G_name:  tfrecords generator name
        :return:        null
        '''
        self.model(X).optimizer(Y)
        with tf.Session() as session:
            self.writer = tf.summary.FileWriter(logdir=self.writer_path,
                                                graph=session.graph)
            if not self.has_model():
                session.run(tf.global_variables_initializer())
            else:
                print("=> loading session from: {:s}".format(self.save_path))
                self.saver.restore(session, self.save_path)

            tf.train.start_queue_runners(sess=session,
                                         coord=tf.train.Coordinator())
            global_step = session.graph.get_tensor_by_name(
                "leaning_rate/global_step:0")
            for index in range(global_step.eval(), self._conf.total_batch):
                (samples, labels) = session.run([G_X, G_Y])
                # _X = np.reshape(G.normalize(samples), [self.batch_size, self.image_size * self.image_size * 3])
                (_X, _Y) = (utils.normalize(samples),
                            utils.reformat(labels, self._conf.label_size))

                (_, params, predict, loss, accuracy, output,
                 summary) = session.run([
                     self.optimizer, self.params_value, self.predict,
                     self.loss, self.accuracy, self.output, self.merged_summary
                 ],
                                        feed_dict={
                                            self.X: _X,
                                            self.Y: _Y
                                        })

                if index % self._conf.train["summary_steps"] == 0:
                    self.writer.add_summary(summary, index)
                    self.writer.flush()
                if index % self._conf.train["saving_steps"] == 0:
                    for i in range(len(self.params_key)):
                        print("=> {:s} -> {:s}".format(self.params_key[i],
                                                       str(params[i][0])))
                    print("=> step={:d}, loss={:.5f}, accuracy:{:.3f}%".format(
                        index, loss, accuracy))
                    if len(params) > 0:
                        print("\n\n\n")
                    self.saver.save(session, self.save_path)
Ejemplo n.º 5
0
    def __getitem__(self, index):
        path = os.path.join(self.data_path, self.samples[index])
        img = default_loader(path)

        if self.random_crop:
            imgw, imgh = img.size
            if imgh < self.image_shape[0] or imgw < self.image_shape[1]:
                img = transforms.Resize(min(self.image_shape))(img)
            img = transforms.RandomCrop(self.image_shape)(img)
        else:
            img = transforms.Resize(self.image_shape)(img)
            img = transforms.RandomCrop(self.image_shape)(img)

        img = transforms.ToTensor()(img)  # turn the image to a tensor
        img = normalize(img)

        if self.return_name:
            return self.samples[index], img
        else:
            return img
Ejemplo n.º 6
0
    def iterator_train(self, X, Y, iterator):
        self.model(X).optimizer(Y)
        with tf.Session() as session:
            self.writer = tf.summary.FileWriter(logdir=self.writer_path,
                                                graph=session.graph)
            if not self.has_model():
                session.run(tf.global_variables_initializer())
            else:
                print("=> loading session from: {:s}".format(self.save_path))
                self.saver.restore(session, self.save_path)

            global_step = session.graph.get_tensor_by_name(
                "leaning_rate/global_step:0")
            for index in range(global_step.eval(), self._conf.total_batch):
                for (count, step_count, samples, labels) in iterator():
                    (_X, _Y) = (utils.normalize(samples),
                                utils.reformat(labels, self._conf.label_size))

                    (_, params, predict, loss, accuracy, output,
                     summary) = session.run([
                         self.optimizer, self.params_value, self.predict,
                         self.loss, self.accuracy, self.output,
                         self.merged_summary
                     ],
                                            feed_dict={
                                                self.X: _X,
                                                self.Y: _Y
                                            })
                    if step_count % self._conf.train["summary_steps"] == 0:
                        self.writer.add_summary(summary, index)
                        self.writer.flush()
                    if step_count % self._conf.train["saving_steps"] == 0:
                        for i in range(len(self.params_key)):
                            print("=> {:s} -> {:s}".format(
                                self.params_key[i], str(params[i][0])))
                        print("=> step={:d}, loss={:.5f}, accuracy:{:.3f}%".
                              format(index, loss, accuracy))
                        if len(params) > 0:
                            print("\n\n\n")
                        self.saver.save(session, self.save_path)
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    print("Arguments: {}".format(args))

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    print("Configuration: {}".format(config))

    try:  # for unexpected error logging
        with torch.no_grad():   # enter no grad context
            if is_image_file(args.image):
                if args.mask and is_image_file(args.mask):
                    # Test a single masked image with a given mask
                    x = default_loader(args.image)
                    mask = default_loader(args.mask)
                    x = transforms.Resize(config['image_shape'][:-1])(x)
                    x = transforms.CenterCrop(config['image_shape'][:-1])(x)
                    mask = transforms.Resize(config['image_shape'][:-1])(mask)
                    mask = transforms.CenterCrop(
                        config['image_shape'][:-1])(mask)
                    x = transforms.ToTensor()(x)
                    mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
                    x = normalize(x)
                    x = x * (1. - mask)
                    x = x.unsqueeze(dim=0)
                    mask = mask.unsqueeze(dim=0)
                elif args.mask:
                    raise TypeError(
                        "{} is not an image file.".format(args.mask))
                else:
                    # Test a single ground-truth image with a random mask
                    ground_truth = default_loader(args.image)
                    ground_truth = transforms.Resize(
                        config['image_shape'][:-1])(ground_truth)
                    ground_truth = transforms.CenterCrop(
                        config['image_shape'][:-1])(ground_truth)
                    ground_truth = transforms.ToTensor()(ground_truth)
                    ground_truth = normalize(ground_truth)
                    ground_truth = ground_truth.unsqueeze(dim=0)
                    bboxes = random_bbox(
                        config, batch_size=ground_truth.size(0))
                    x, mask = mask_image(ground_truth, bboxes, config)

                # Set checkpoint path
                if not args.checkpoint_path:
                    checkpoint_path = os.path.join('checkpoints',
                                                   config['dataset_name'],
                                                   config['mask_type'] + '_' + config['expname'])
                else:
                    checkpoint_path = args.checkpoint_path

                # Define the trainer
                netG = Generator(config['netG'], cuda, device_ids)
                # Resume weight
                last_model_name = get_model_list(
                    checkpoint_path, "gen", iteration=args.iter)
                netG.load_state_dict(torch.load(last_model_name))
                model_iteration = int(last_model_name[-11:-3])
                print("Resume from {} at iteration {}".format(
                    checkpoint_path, model_iteration))

                if cuda:
                    netG = nn.parallel.DataParallel(
                        netG, device_ids=device_ids)
                    x = x.cuda()
                    mask = mask.cuda()

                # Inference
                x1, x2, offset_flow = netG(x, mask)
                inpainted_result = x2 * mask + x * (1. - mask)

                vutils.save_image(inpainted_result, args.output,
                                  padding=0, normalize=True)
                print("Saved the inpainted result to {}".format(args.output))
                if args.flow:
                    vutils.save_image(offset_flow, args.flow,
                                      padding=0, normalize=True)
                    print("Saved offset flow to {}".format(args.flow))
            else:
                raise TypeError("{} is not an image file.".format)
        # exit no grad context
    except Exception as e:  # for unexpected error logging
        print("Error: {}".format(e))
        raise e
Ejemplo n.º 8
0
from utils.tools import get_word_to_index, tokenize, normalize
from utils.resourceManager import get_data
from utils.statsmanager import StatsManager

print("Getting data...")
data = get_data("train", "zh")
a_to_index = get_word_to_index([d[0] for d in data])
b_to_index = get_word_to_index([d[1] for d in data])
data = [(tokenize(a, a_to_index), tokenize(b, b_to_index), score)
        for a, b, score in data]

val_data = get_data("dev", "zh")
val_data = [(tokenize(a, a_to_index), tokenize(b, b_to_index), score)
            for a, b, score in val_data]

val_a_normalized, val_a_len = normalize([row[0] for row in val_data])
val_b_normalized, val_b_len = normalize([row[1] for row in val_data])
val_a = torch.tensor(val_a_normalized, dtype=int)
val_b = torch.tensor(val_b_normalized, dtype=int)
val_labels = torch.tensor([row[2] for row in val_data]).view(
    (len(val_data), 1))

print("Tokenized data")

model = LSTM(a_vocab_size=len(a_to_index),
             b_vocab_size=len(b_to_index),
             padding_index=0,
             lstms_in_out=((5, 5), (5, 5)),
             linear_layers=(10, 5),
             out_size=1,
             hidden_activation=nn.ReLU,
Ejemplo n.º 9
0
    def test(self):
        netG=Generator().to(device)
        netD=Discriminator().to(device)
        path = os.path.join(self.dataset_path, 'test')
        print(path, 'PATH')
        os.getcwd()

        path2 = self.choose_net

        beta1=0.5
        beta2=0.999

        Testset = torchvision.datasets.DatasetFolder(
            root=path,
              loader=npy_loader,
              extensions=('.npy',)
         )

        test_loader = torch.utils.data.DataLoader(dataset=Testset,
                                                    batch_size=1,
                                                    shuffle=False)


        g=0

        g_optimizer = torch.optim.Adam(netG.parameters(), 0.0001, [beta1, beta2])
        d_optimizer = torch.optim.Adam(netD.parameters(), 0.0001, [beta1, beta2])
        netG, g_optimizer = amp.initialize(netG, g_optimizer, opt_level='O1')
        netD, d_optimizer = amp.initialize(netD, d_optimizer, opt_level='O1')
        g_optimizer.zero_grad()
        d_optimizer.zero_grad()

        netG = torch.nn.DataParallel(netG, device_ids=[id], output_device=id)
        netD = torch.nn.DataParallel(netD, device_ids=[id], output_device=id)


        try_loading_file=True
        if try_loading_file:

            try:

                netG.load_state_dict(torch.load(os.path.join(path2, 'netG_synthetic.pt'), map_location={'cuda:0': 'cpu'}))  # does NOT load optimizer state etc.
                netD.load_state_dict(torch.load(os.path.join(path2, 'netD_synthetic.pt'), map_location={'cuda:0': 'cpu'}))

                print("loaded model from file")
            except:
                print("loading model from file failed; created new model")

        c_dim=2
        #



# =================================================================================== #
#                                 5. Testing                                          #
# =================================================================================== #


        """Translate images using StarGAN trained on a single dataset."""

        loss_metric = nn.MSELoss()
        with torch.no_grad():
             sum_dice = 0;
             count = 0
             correct = 0
             total = 0; total_auc=0
             netD = netD.eval(); netG = netG.train()
             total_rec = 0;total_diff = 0; total_var=0; total_var2=0;sum_ssim=0;threshtot=0
             count_diseased = 0;count_healthy = 0

             for i, (X3, c_org) in enumerate(test_loader):

                 GT = -X3[:, 1, :, :]
                 thresh = threshold_otsu(np.array(abs(GT[0,:,:])))
                 threshtot+=thresh
                 x_real = torch.tensor(X3[:, :1, :, :]).to(device)
                 noise = torch.rand(x_real.shape).to(device)*0
                 x_real = (x_real + 0.05 * noise).half()
                 (_, out_cls) = netD(x_real,g)
                 _, predicted = torch.max(out_cls.data, 1)
                 correct += (predicted.cpu() == c_org).sum().item()
                 total+=1
                 avg_thresh=0.44195

                 c_trg_list = create_labels(c_org, c_dim)

        #         # Translate images.
                 x_fake_list = [x_real]
                 for c_trg in c_trg_list:
                     x_fake_list.append((netG(x_real, c_trg)))
        #

                 if c_org == 0:

                     diff = normalize(x_fake_list[0][0, 0, :, :]).cpu() - normalize (x_fake_list[1][0, 0, :,:]).cpu()  # check whether 2 ist diseased (ich glaub schon). Dann nimm immer die Differenz zwischen diseased und Original

                     thresholded_images = np.double(abs(diff) > avg_thresh)*1
                     GTthresh = np.double(abs(GT )> avg_thresh) * 1
                     reconstruction = loss_metric(normalize(x_fake_list[1]), normalize(x_fake_list[0]))
                     varianz=diff.var()
                     total_rec += reconstruction
                     total_var+=varianz
                     count_healthy += 1

                 else:
                     diff = normalize(x_fake_list[0][0, 0, :, :]).cpu() - normalize(x_fake_list[1][0, 0, :, :]).cpu()  # check whether 2 ist diseased (ich glaub schon). Dann nimm immer die Differenz zwischen diseased und Original

                     thresh = threshold_otsu(np.array(abs(diff)))
                     print(i, thresh, 2 * abs(thresh))
                     thresholded_images = np.double(abs(diff) > avg_thresh)*1#1 * abs(thresh)) * 1
                     GTthresh = np.double(abs(GT) > avg_thresh) * 1
                     region = loss_metric(diff, GT[0, :, :])
                     total_diff += region
                     varianz2 = (normalize(np.array(GT[0, :, :])) - normalize(np.array(diff))).var()
                     total_var2 += varianz2

                     count_diseased += 1
                     (output_DSC, avg) = eval_binary_classifier(np.array(GTthresh[0, :, :]), thresholded_images)
                     sum_dice += output_DSC['DSC'];
                     count += 1

                     ssim_val = ssim(visualize(diff[None,None,...]), visualize(GT[None,...]), data_range=1, size_average=False)
                     sum_ssim+=ssim_val


                     pixel_wise_cls = np.array(torch.tensor(visualize(abs(diff))).view(1, -1))[0, :]
                     pixel_wise_gt = np.array(torch.tensor(GTthresh).view(1, -1))[0, :]

                     auc = roc_auc_score(pixel_wise_gt, pixel_wise_cls)
                     print('auc', i, auc)
                     total_auc += auc
        #
                 if i%10==0:

                     plt.figure(i)      #plot results
                     ax = plt.subplot(2, 4, 1)
                     plt.imshow((normalize((x_fake_list[0][0, 0, :, :]))).cpu())
                     ax.title.set_text('original')
                     ax = plt.subplot(2, 4, 2)
                     plt.imshow(normalize(x_fake_list[1][0, 0, :, :]).cpu())
                     ax.title.set_text('label 0')
                     ax.axis('off')
                     ax = plt.subplot(2, 4, 3)
                     plt.imshow(normalize(x_fake_list[2][0, 0, :, :]).cpu())
                     ax.axis('off')
                     ax.title.set_text('label 1')

                     ax = plt.subplot(2, 4, 5)
                     plt.imshow(thresholded_images)
                     ax.title.set_text('differenz thresholded')
                     ax = plt.subplot(2, 4, 6)
                     plt.imshow(GT[0, :, :])
            #
                     ax.title.set_text('Ground Truth')
                     ax = plt.subplot(2, 4, 4)
                     plt.imshow(diff)
                     ax.title.set_text('difference')
                     if c_org != 0:
                        plt.suptitle(auc)
                     ax = plt.subplot(2, 4, 8)
                     plt.imshow(GTthresh[0, :, :])
                     ax.title.set_text('GTthresh')

        accuracy= 100 * correct / total
        avg_diff=total_diff/count_diseased
        avg_auc=total_auc/count_diseased
        avg_rec=total_rec/count_healthy
        avg_var=total_var/count_healthy
        avg_var2=total_var2/count_diseased
        avg_ssim=sum_ssim/count_diseased
        avg_dice=sum_dice/count
        print('average mse reconstruction error', avg_rec,'average mse in segmentation', avg_diff, 'average Dice' ,avg_dice, 'classification accuracy', accuracy, 'AUROC', avg_auc, 'SSIM', avg_ssim,'varianz healthy', avg_var,'varianz krak', avg_var2 )
        f = open('./descargan.txt', 'w')
        f.write('auroc '+str(auc)+'\n')
        f.write('MSE(a_h, r_h) ' + str(avg_rec) + '\n')
        f.write('varianz reconstruction ' + str(avg_var) + '\n')
        f.write('varianz difference ' + str(avg_var2) + '\n')
        f.write('Dice ' + str(avg_dice) + '\n')
        f.write('classification accuracy ' + str(accuracy) + '\n')
        f.write('MSE(gt, d) ' + str(avg_diff) + '\n')
        f.write('SSIM ' + str(avg_ssim) + '\n')
Ejemplo n.º 10
0
def generateInpaintedImage(args, netG, imagePath):
    config = get_config(args.g_config)
    occlusions = []

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    print("Arguments: {}".format(args))

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    try:  # for unexpected error logging
        with torch.no_grad():  # enter no grad context
            if is_image_file(imagePath):
                if args.mask and is_image_file(args.mask):
                    # Test a multiple masked image with a given mask
                    x = default_loader(imagePath)
                    x = transforms.Resize([512, 1024])(x)

                    mask = default_loader(args.mask)
                    mask = transforms.Resize(config['image_shape'][:-1])(mask)
                    mask = transforms.CenterCrop(
                        config['image_shape'][:-1])(mask)
                    mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
                    mask = mask.unsqueeze(dim=0)

                    w, h = x.size
                    first = x.crop((0, 0, w // 3, h))
                    second = x.crop((w // 3, 0, ((w // 3) * 2) + 2, h))
                    third = x.crop(((w // 3) * 2, 0, w, h))

                    for y in [first, second, third]:
                        y = transforms.CenterCrop(
                            config['image_shape'][:-1])(y)
                        y = transforms.ToTensor()(y)
                        y = normalize(y)
                        y = y * (1. - mask)
                        occlusions.append(y)

                elif args.mask:
                    raise TypeError("{} is not an image file.".format(
                        args.mask))

                default_image = default_loader(imagePath)
                di_w, di_h = default_image.size

                for idx, occlusion in enumerate(occlusions):
                    if cuda:
                        occlusion = occlusion.cuda()
                        mask = mask.cuda()

                    # Inference
                    x1, x2, offset_flow = netG(occlusion, mask)
                    inpainted_result = x2 * mask + occlusion * (1. - mask)

                    inp_hw = config['image_shape'][1]

                    if idx == 0:
                        offset = ((di_w // 3 - inp_hw) // 2,
                                  (di_h - inp_hw) // 2)
                    elif idx == 1:
                        offset = ((di_w - inp_hw) // 2, (di_h - inp_hw) // 2)
                    elif idx == 2:
                        offset = ((((di_w - inp_hw) // 2) + (di_w // 3)),
                                  (di_h - inp_hw) // 2)

                    grid = vutils.make_grid(inpainted_result, normalize=True)

                    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
                    ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(
                        1, 2, 0).to('cpu', torch.uint8).numpy()
                    im = Image.fromarray(ndarr)

                    im = transforms.CenterCrop(config['mask_shape'])(im)
                    im = transforms.Resize(config['image_shape'][:-1])(im)
                    default_image.paste(im, offset)

                return default_image
            else:
                raise TypeError("{} is not an image file.".format)
        # exit no grad context
    except Exception as e:  # for unexpected error logging
        print("Error: {}".format(e))
        raise e
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    print("Arguments: {}".format(args))

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    print("Configuration: {}".format(config))

    try:  # for unexpected error logging
        with torch.no_grad():  # enter no grad context
            file = dataset_files(args.test_root, "*.jpg")
            mask_file = dataset_files(args.mask_dir, "*.png")
            for j in range(len(mask_file)):
                for i in range(len(file)):
                    if is_image_file(file[i]):
                        if mask_file and is_image_file(mask_file[j]):
                            # Test a single masked image with a given mask
                            x = default_loader(file[i])
                            mask = default_loader(mask_file[j])
                            # x = cv2.cvtColor(cv2.imread(file[i]), cv2.COLOR_BGR2RGB)
                            # mask = cv2.cvtColor(cv2.imread(mask_file[j]), cv2.COLOR_BGR2RGB)
                            # x = cv2.resize(x, (config['image_shape'][0], config['image_shape'][1]))
                            # mask = cv2.resize(mask, (config['image_shape'][0], config['image_shape'][1]))
                            x = transforms.Resize(
                                config['image_shape'][:-1])(x)
                            x = transforms.CenterCrop(
                                config['image_shape'][:-1])(x)
                            # mask = transforms.Resize(config['image_shape'][:-1])(mask)
                            # mask = transforms.CenterCrop(config['image_shape'][:-1])(mask)
                            x = transforms.ToTensor()(x)
                            mask = transforms.ToTensor()(mask)[0].unsqueeze(
                                dim=0)
                            x = normalize(x)
                            x = x * (1. - mask)
                            x = x.unsqueeze(dim=0)
                            # x_raw = x
                            mask = mask.unsqueeze(dim=0)
                        elif mask_file[j]:
                            raise TypeError("{} is not an image file.".format(
                                mask_file[j]))
                        else:
                            # Test a single ground-truth image with a random mask
                            ground_truth = default_loader(file[i])
                            ground_truth = transforms.Resize(
                                config['image_shape'][:-1])(ground_truth)
                            ground_truth = transforms.CenterCrop(
                                config['image_shape'][:-1])(ground_truth)
                            ground_truth = transforms.ToTensor()(ground_truth)
                            ground_truth = normalize(ground_truth)
                            ground_truth = ground_truth.unsqueeze(dim=0)
                            bboxes = test_bbox(config,
                                               batch_size=ground_truth.size(0),
                                               t=50,
                                               l=50)
                            x, mask = mask_image(ground_truth, bboxes, config)

                        # Set checkpoint path
                        if not args.checkpoint_path:
                            checkpoint_path = os.path.join(
                                'checkpoints', config['dataset_name'],
                                config['mask_type'] + '_' + config['expname'])
                        else:
                            checkpoint_path = args.checkpoint_path

                        # Define the trainer
                        netG = Generator(config['netG'], cuda, device_ids)
                        # Resume weight
                        g_checkpoint = torch.load(f'{checkpoint_path}/gen.pt')
                        netG.load_state_dict(g_checkpoint)
                        # model_iteration = int(last_model_name[-11:-3])
                        print("Model Resumed".format(checkpoint_path))

                        if cuda:
                            netG = nn.parallel.DataParallel(
                                netG, device_ids=device_ids)
                            x = x.cuda()
                            mask = mask.cuda()

                        # Inference
                        x1, x2 = netG(x, mask)
                        inpainted_result = x2 * mask + x * (1. - mask)
                        inpainted_result_cpu = torch.Tensor.cpu(
                            inpainted_result).detach().permute(0, 2, 3, 1)
                        inpainted_result_cpu = np.asarray(
                            inpainted_result_cpu[0])
                        inpainted_result_cpu = cv2.normalize(
                            inpainted_result_cpu, inpainted_result_cpu, 0, 255,
                            cv2.NORM_MINMAX)

                        # cat_result = torch.cat([x, inpainted_result, ground_truth], dim=3).cuda()

                        vutils.save_image(inpainted_result,
                                          args.output_dir +
                                          'output_{}/'.format(j + 1) +
                                          'output_{}.png'.format(i),
                                          padding=0,
                                          normalize=True)
                        # cv2.imwrite(args.output_dir+ 'output_{}/'.format(j+1) + 'output_{}.png'.format(i), inpainted_result_cpu)
                        #             cv2.cvtColor(inpainted_result_cpu, cv2.COLOR_BGR2RGB))
                        print("{}th image saved".format(i))
                    else:
                        raise TypeError("{} is not an image file.".format)
            # exit no grad context
    except Exception as e:  # for unexpected error logging
        print("Error: {}".format(e))
        raise e
Ejemplo n.º 12
0
    def test(self):
        path = os.path.join(self.dataset_path, 'test')
        print(path, 'PATH')

        path2 = self.choose_net

        netG = Generator().to(device)
        netD = Discriminator().to(device)

        p1 = np.array([np.array(p.shape).prod()
                       for p in netG.parameters()]).sum()
        p2 = np.array([np.array(p.shape).prod()
                       for p in netD.parameters()]).sum()
        print(p1, p2, p1 / (256 * 256))

        beta1 = 0.5
        beta2 = 0.999

        Testset = torchvision.datasets.DatasetFolder(root=path,
                                                     loader=npy_loader,
                                                     extensions=('.npy', ))

        test_loader = torch.utils.data.DataLoader(dataset=Testset,
                                                  batch_size=1,
                                                  shuffle=False)

        g_optimizer = torch.optim.Adam(netG.parameters(), 0.0001,
                                       [beta1, beta2])
        d_optimizer = torch.optim.Adam(netD.parameters(), 0.0001,
                                       [beta1, beta2])
        netG, g_optimizer = amp.initialize(netG, g_optimizer, opt_level='O1')
        netD, d_optimizer = amp.initialize(netD, d_optimizer, opt_level='O1')

        netG = torch.nn.DataParallel(netG, device_ids=[id], output_device=id)
        netD = torch.nn.DataParallel(netD, device_ids=[id], output_device=id)

        g = 0

        try_loading_file = True
        if try_loading_file:
            try:
                netG.load_state_dict(
                    torch.load(os.path.join(path2, 'netG_chexpert.pt'),
                               map_location={
                                   'cuda:0': 'cpu'
                               }))  #does NOT load optimizer state etc.
                netD.load_state_dict(
                    torch.load(os.path.join(path2, 'netD_chexpert.pt'),
                               map_location={
                                   'cuda:0': 'cpu'
                               }))  #does NOT load optimizer state etc.

                print("loaded model from file")
            except:
                print("loading model from file failed; created new model")

        c_dim = 2
        # =================================================================================== #
        #                                 5. Testing                                          #
        # =================================================================================== #
        """Translate images using StarGAN trained on a single dataset."""
        netD = netD.eval()
        total = 0
        correct = 0
        count_gesund = 0
        total_rec = 0
        total_var = 0
        loss_metric = nn.MSELoss()
        with torch.no_grad():
            long_pred = torch.zeros(0).long()
            long_cls = torch.zeros(0).long()
            long_score = torch.zeros(0)
            for i, (X, label_org) in enumerate(test_loader):
                if i < 500:

                    # Prepare input images and target domain labels.
                    x_real = np.array(X).astype(np.float32)
                    x_real = np.transpose(np.array(x_real), (0, 3, 1, 2))
                    x_real = torch.tensor(x_real).half().to(device)
                    x_real = normalize(x_real).to(device)

                    c_trg_list = create_labels(label_org, c_dim)
                    print('xreal', x_real.shape)
                    (_, out_cls) = netD(x_real, 0)
                    print('out_cls', out_cls)
                    _, predicted = torch.max(out_cls.data, 1)
                    y_score = out_cls[:, 1].cpu()
                    total += 1
                    correct += (predicted.cpu() == label_org).sum().item()
                    long_pred = torch.cat((long_pred, predicted.cpu()), dim=0)
                    long_cls = torch.cat((long_cls, label_org), dim=0)
                    long_score = torch.cat((long_score, y_score), dim=0)

                    # Translate images.
                    x_fake_list = [x_real]
                    for c_trg in c_trg_list:
                        x_fake_list.append((netG(x_real, c_trg)))

                    if label_org == 0:
                        text = 'original healthy'
                        reconstruction = loss_metric(x_fake_list[1],
                                                     x_fake_list[0])
                        total_rec += reconstruction
                        count_gesund += 1
                        diff = normalize(
                            x_fake_list[1][0, 0, :, :]).cpu() - normalize(
                                x_fake_list[0][0, 0, :, :]).cpu()
                        thresh = threshold_otsu(np.array(diff))
                        print(i, thresh)
                        varianz = diff.var()
                        total_var += varianz
                    else:
                        text = 'original diseased'
                        diff = -normalize(
                            x_fake_list[1][0, 0, :, :]).cpu() + normalize(
                                x_fake_list[0][0, 0, :, :]).cpu()
                        thresh = threshold_otsu(np.array(abs(diff)))
                        thresholded_images = np.double(
                            abs(diff) > 1 * abs(thresh))
                        print(i, thresh)

                    if i % 2 == 0 and label_org == 1:  #plot output images
                        img = torch.zeros(7, 256, 256)
                        img[0, :, :] = x_fake_list[0][0, 0, :, :].cpu()
                        img[1, :, :] = x_fake_list[1][0, 0, :, :].cpu()
                        img[2, :, :] = x_fake_list[2][0, 0, :, :].cpu()
                        img[3, :, :] = diff
                        img[4, :, :] = torch.tensor(thresholded_images)

                        plt.figure(i)
                        ax = plt.subplot(2, 3, 1)
                        plt.imshow((normalize(
                            (x_fake_list[0][0, 0, :, :]))).cpu())
                        ax.axis('off')
                        ax.title.set_text(text)
                        ax = plt.subplot(2, 3, 2)
                        plt.imshow(normalize(x_fake_list[1][0, 0, :, :]).cpu())
                        ax.title.set_text('generated healthy')
                        ax.axis('off')
                        ax = plt.subplot(2, 3, 3)
                        plt.imshow(normalize(x_fake_list[2][0, 0, :, :]).cpu())
                        ax.axis('off')
                        ax.title.set_text('generated diseased')

                        ax = plt.subplot(2, 3, 4)
                        plt.imshow(diff)
                        ax.title.set_text('difference')
                        ax.axis('off')

            accuracy = 100 * correct / total
            auc = roc_auc_score(long_cls, long_score)
            (kappa, upper, lower) = kappa_score(long_pred, long_cls)
            avg_rec = total_rec / count_gesund
            avg_var = total_var / count_gesund

        print('AUROC', auc, 'accuracy', accuracy, 'kappa', kappa,
              'mse reconstruction error', avg_rec, 'varianz gesund', avg_var)
        f = open('.descargan.txt', 'w')
        f.write('auroc ' + str(auc) + '\n')
        f.write('accuracy ' + str(accuracy) + '\n')
        f.write('MSE(a_h, r_h) ' + str(avg_rec) + '\n')
        f.write('kappa ' + str(kappa) + '\n')
        f.write('varianz reconstruction ' + str(avg_var) + '\n')
Ejemplo n.º 13
0
        elif (label in [
                "Silence", "IPU", "SpeechActivity", "Overlap", "joint_laugh"
        ]):
            df[label] = sample_square(time_series[label], physio_index)

        elif label in [
                "Particles", "Discourses", "FilledBreaks", "Laughters",
                "Feedbacks", "UnionSocioItems"
        ]:
            df[label] = sample_cont_ts(time_series[label],
                                       physio_index,
                                       mode="binary")

        else:
            if label == "Signal":
                df[label] = ts.normalize(
                    sample_cont_ts(time_series[label], physio_index))
            else:
                df[label] = sample_cont_ts(time_series[label], physio_index)

    if args.left:
        for i in range(len(labels)):
            labels[i] += "_P"
    else:
        for i in range(len(labels)):
            labels[i] += "_I"
    df.columns = ["Time (s)"] + labels

    # save data
    df.to_pickle(output_filename_pkl)
Ejemplo n.º 14
0
 if is_image_file(args.image):
     if args.mask and is_image_file(args.mask):
         # Test a single masked image with a given mask
         x = tif_loader(args.image)
         ## center crop
         x = x[110:366, 110:366, :]
         x = torch.from_numpy(x)
         mask = default_loader(args.mask)  ## 476 --> 256
         #x = x[110:366, 110:366,:]
         #x = transforms.Resize(config['image_shape'][:-1])(x)
         #x = transforms.CenterCrop(config['image_shape'][:-1])(x)
         mask = transforms.Resize(config['image_shape'][:-1])(mask)
         mask = transforms.CenterCrop(config['image_shape'][:-1])(mask)
         #x = transforms.ToTensor()(x)
         mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
         x = normalize(x)
         #x = x * (1. - mask)
         x = x.unsqueeze(dim=0)
         mask = mask.unsqueeze(dim=0)
     elif args.mask:
         raise TypeError("{} is not an image file.".format(args.mask))
     else:
         # Test a single ground-truth image with a random mask
         ground_truth = tif_loader(args.image)
         #ground_truth = transforms.Resize(config['image_shape'][:-1])(ground_truth)
         #ground_truth = transforms.CenterCrop(config['image_shape'][:-1])(ground_truth)
         #ground_truth = transfer2tensor(ground_truth)
         #ground_truth = transforms.ToTensor()(ground_truth)
         x = ground_truth[110:366, 110:366, :]
         x = np.transpose(x, [2, 0, 1])
         #print('Output min, max',x.min(),x.max())
Ejemplo n.º 15
0
    def __getitem__(self, index):

        # first, load gt image
        gt_path = os.path.join(self.gt_path, self.samples[index])
        img = default_loader(gt_path, chan='L')

        # get original image for GT images
        # png_to_jpg_ext = self.samples[index]

        original_path = os.path.join(self.data_path, self.samples[index])
        original_path = original_path.replace('png', 'jpg')

        # but, if the original image is not exist, skip it
        orig_img = default_loader(original_path)

        #img_path = os.path.join(self.data_path, self.gt_samples[index])

        #print("====img(labeled)")
        #print(img.size)

        if self.random_crop:
            # GT IMAGE

            # GT IMAGE -> 128*128*3 -----> 128*128*9

            imgw, imgh = img.size

            if imgh < self.image_shape[0] or imgw < self.image_shape[1]:
                img = transforms.Resize(min(self.image_shape))(img)
            img = transforms.RandomCrop(self.image_shape)(img)

            # ORIGINAL IMAGE
            imgw, imgh = orig_img.size
            if imgh < self.image_shape[0] or imgw < self.image_shape[1]:
                orig_img = transforms.Resize(min(self.image_shape))(orig_img)
            orig_img = transforms.RandomCrop(self.image_shape)(orig_img)

        else:
            img = transforms.Resize(self.image_shape)(img)
            img = transforms.RandomCrop(self.image_shape)(img)
            orig_img = transforms.Resize(self.image_shape)(orig_img)
            orig_img = transforms.RandomCrop(self.image_shape)(orig_img)

        # img : (0 ~ 15), we have to convert it to 16 channel
        #img = np.array(img, dtype=np.int32)

        img = np.array(img, dtype=np.uint8)

        #print("max : " + str(img.max()) + ", low : " + str(img.min()))
        #img = transforms.ToTensor()(img).int() # turn the image to a tensor
        img = torch.from_numpy(img).long()
        #print(">max : " + str(img.max()) + ", low : " + str(img.min()))
        #\img = normalize(img)

        orig_img = transforms.ToTensor()(
            orig_img)  # turn the image to a tensor
        orig_img = normalize(orig_img)

        raw_name = self.samples[index].split('.')[0]

        # =============================== OUTPUT IMAGE CAUTION SIZE!!!
        #target = np.zeros([self.n_classes, 128, 128])
        target = np.zeros([self.n_classes, 256, 256])
        for c in range(self.n_classes):
            target[c][img == c] = 1

        target = torch.from_numpy(target).float()

        #lbl_img = m.toimage(img, high=img.max(), low=img.min())
        #print(lbl_img.shape)

        # RETURN (NAME), GT, TARGET(ONE-HOT), ORIG
        if self.return_name:
            return raw_name, img, target, orig_img
        else:
            return img, target, orig_img
Ejemplo n.º 16
0
def generate(img, img_mask_path, model_path):
    with torch.no_grad():   # enter no grad context
        if img_mask_path and is_image_file(img_mask_path):
            # Test a single masked image with a given mask
            x = Image.fromarray(img)
            mask = default_loader(img_mask_path)
            x = transforms.Resize(config['image_shape'][:-1])(x)
            x = transforms.CenterCrop(config['image_shape'][:-1])(x)
            mask = transforms.Resize(config['image_shape'][:-1])(mask)
            mask = transforms.CenterCrop(config['image_shape'][:-1])(mask)
            x = transforms.ToTensor()(x)
            mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
            x = normalize(x)
            x = x * (1. - mask)
            x = x.unsqueeze(dim=0)
            mask = mask.unsqueeze(dim=0)
        elif img_mask_path:
            raise TypeError("{} is not an image file.".format(img_mask_path))
        else:
            # Test a single ground-truth image with a random mask
            #ground_truth = default_loader(img_path)
            ground_truth = img
            ground_truth = transforms.Resize(config['image_shape'][:-1])(ground_truth)
            ground_truth = transforms.CenterCrop(config['image_shape'][:-1])(ground_truth)
            ground_truth = transforms.ToTensor()(ground_truth)
            ground_truth = normalize(ground_truth)
            ground_truth = ground_truth.unsqueeze(dim=0)
            bboxes = random_bbox(config, batch_size=ground_truth.size(0))
            x, mask = mask_image(ground_truth, bboxes, config)

        # Set checkpoint path
        if not model_path:
            checkpoint_path = os.path.join('checkpoints',
                                           config['dataset_name'],
                                           config['mask_type'] + '_' + config['expname'])
        else:
            checkpoint_path = model_path

        # Define the trainer
        netG = Generator(config['netG'], cuda, device_ids)
        # Resume weight
        last_model_name = get_model_list(checkpoint_path, "gen", iteration=0)
        
        if cuda:
            netG.load_state_dict(torch.load(last_model_name))
        else:
            netG.load_state_dict(torch.load(last_model_name, map_location='cpu'))
                                 
        model_iteration = int(last_model_name[-11:-3])
        print("Resume from {} at iteration {}".format(checkpoint_path, model_iteration))

        if cuda:
            netG = nn.parallel.DataParallel(netG, device_ids=device_ids)
            x = x.cuda()
            mask = mask.cuda()

        # Inference
        x1, x2, offset_flow = netG(x, mask)
        inpainted_result = x2 * mask + x * (1. - mask)
        inpainted_result =  from_torch_img_to_numpy(inpainted_result, 'output.png', padding=0, normalize=True)

        return inpainted_result
    def run(self, iterator, is_true_test=False):
        if not self.net.has_model():
            print("=> Please train your model first!!")
            exit(1)

        # build graph
        self.net.model(self.X).optimizer(self.Y)
        with tf.Session() as session:
            # restore session
            print("=> loading session from: {:s}".format(self.net.save_path))
            self.net.saver.restore(session, self.net.save_path)

            # file iterator
            pre_output = []

            for (index, flag, name, X, Y, Q, tamper_rate, shape, offset_x,
                 offset_y, width, height) in iterator():
                (_X, _Y) = (utils.normalize(X),
                            utils.reformat(Y, self._conf.label_size))
                (params, softmax, loss, predict,
                 accuracy) = session.run([
                     self.net.params_value, self.net.output, self.net.loss,
                     self.net.predict, self.net.accuracy
                 ],
                                         feed_dict={
                                             self.X: _X,
                                             self.Y: _Y
                                         })
                # tmp = [softmax(*label_size), predict, label, confidence]
                tmp = np.zeros([256, 8], dtype=np.float32)
                tmp[:, 0:5] = softmax
                tmp[:, 5] = predict
                tmp[:, 6] = Y
                tmp[:, 7] = Q
                tmp = np.reshape(tmp, (-1, 8))
                pre_output.append(tmp)

                # save result into csv file
                if index == flag:
                    pre_output = np.array(pre_output, dtype=np.float32)
                    name = "{:s}_{:d}_{:d}".format(name, width, height)
                    df = pd.DataFrame(np.reshape(pre_output, (-1, 8)))
                    print(df.shape)

                    if not is_true_test:
                        csv_path = os.path.join(self.model_path, "pre-train",
                                                "{:s}.csv".format(name))
                    else:
                        csv_path = os.path.join(self.model_path,
                                                "pre-train-true",
                                                "{:s}.csv".format(name))
                    print("=> save pretrain at csv_path={:s}".format(csv_path))
                    df.to_csv(csv_path)
                    pre_output = []

                for i in range(len(predict)):
                    if predict[i] != Y[i]:
                        print(
                            "=> index={:d} p={:d} Y={:d} tamper_rate={:.5f} output={:s}"
                            .format(i, predict[i], Y[i], tamper_rate[i],
                                    softmax[i]))
                print(
                    "=> ({:d}, {:d}), loss={:.5f}, accuracy:{:.3f}% \n".format(
                        index, flag, loss, accuracy))
                if len(params) > 0: print("\n\n")

            print(
                "=> pretrain csv format={f1,f2,f3...,predict,label,texture_quality}, f1,f2 is CNN confidence for each model\n"
            )
Ejemplo n.º 18
0
                g1 = (-x.grad)
                g2 = (-y.grad)
                disp2 = torch.cat((g1[None, ..., None], g2[None, ..., None]),
                                  3).float()
                disp2[disp2 != disp2] = 0
                print('disp2')
                vgrid = grid + disp2 / disp2.max() * 0.1

                output = torch.nn.functional.grid_sample(X, vgrid)

                Out = torch.tensor(output[0, 0, :, :].detach().numpy())

                diff = -X[0, 0, :, :] + Out

                X = normalize(X)
                plt.figure(1)
                plt.subplot(1, 3, 1)
                plt.imshow(X[0, 0, :, :])
                plt.title('input healthy')
                plt.axis('off')
                plt.subplot(1, 3, 2)
                output = normalize(Out)
                plt.subplot(1, 3, 2)
                plt.imshow(Out)
                plt.title('output diseased')
                plt.axis('off')
                plt.subplot(1, 3, 3)
                plt.imshow((-diff), cmap='viridis')
                plt.title('Differenz')
                plt.axis('off')