Beispiel #1
0
 def evaluate(self, cfg, carray, issame, nrof_folds=5, tta=False):
     self.model.eval()
     idx = 0
     embeddings = np.zeros([len(carray), cfg.MODEL.HEADS.EMBEDDING_DIM])
     batch_size = cfg.SOLVER.IMS_PER_BATCH
     with torch.no_grad():
         while idx + batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + batch_size])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(self.device)) + self.model(
                     fliped.to(self.device))
                 embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
             else:
                 embeddings[idx:idx + batch_size] = self.model(
                     batch.to(self.device)).cpu()
             idx += batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(self.device)) + self.model(
                     fliped.to(self.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.model(batch.to(self.device)).cpu()
     tpr, fpr, accuracy, best_thresholds = scores(embeddings, issame,
                                                  nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Beispiel #2
0
    save_dir = os.path.join(
        "experiment",
        CONFIG.EXP_ID,
        "metric",
        CONFIG.TEST_ID
    )
    makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores.json")
    print("Metric dst:", save_path)

    preds, gts = [], []
    for image_id, image, gt_label in tqdm(
        loader, total=len(loader), dynamic_ncols=True
    ):
        # Image
        image = image.to(device)

        pred = inference(image, model, CONFIG.MODEL.MSC_FACTORS)

        preds.append(pred)
        gts += list(gt_label.numpy())
        print("\n")
        pprint.pprint(scores(gts[-1:], preds[-1:], CONFIG.DATASET.N_CLASSES))

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(save_path, "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)

            input.resize_as_(real_cpu).copy_(real_cpu)
            inputv = Variable(input)
            labelv_semantic = Variable(label_cpu)

            images = padder(inputv)
            feature_maps = net_features(images)
            outputs = net_segmenter(feature_maps)

            pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
            gt = labelv_semantic.data.cpu().numpy()
            for gt_, pred_ in zip(gt, pred):
                gts.append(gt_)
                preds.append(pred_)

        print('\n' + '-' * 40)
        score, class_iou = scores(gts, preds, n_class=NUM_CLASSES)
        for k, v in score.items():
            print(k, v)
        print('-' * 20)
        for i in range(NUM_CLASSES):
            print(i, class_iou[i])
        print('-' * 40 + '\n')

    # do checkpointing
    if (epoch % 10 == 0) and (epoch > 0):
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(net_features.state_dict(),
                   '%s/net_features_epoch_%d.pth' % (opt.outf, epoch))
Beispiel #4
0
    def test_during_train(self, epoch, args):
        """Test SG-GAN"""
        # print(" [*] Running Test ...")

        sample_files = glob(
            './datasets/{}/*.*'.format(args.dataset_dir + '/testA')
        )  # glob('./datasets/{}/*.*'.format(self.dataset_dir + '/testA'))

        preds1 = []
        preds2 = []
        preds3 = []
        preds4 = []
        preds5 = []
        gts1 = []
        gts2 = []
        gts3 = []
        gts4 = []
        gts5 = []

        fake_img = []
        actual_image = []
        output_images = []

        plot_labels = True

        for sample_file in sample_files:
            # print('Processing image: ' + sample_file)

            #### [MODIFIED] to test metric functions ####
            #### sample_image = [load_test_data(sample_file, args.image_width, args.image_height)]

            #### [CHANGES]
            sample_image, seg_image, seg_mask_64, seg_mask_8 = load_test_data(
                sample_file, args.image_width, args.image_height)
            sample_image = [sample_image]
            seg_image = [seg_image]
            # seg_maks_64 = [seg_mask_64]
            seg_mask_8 = [seg_mask_8]

            seg_image = np.array(seg_image).astype(np.float32)
            seg_mask_8 = np.array(seg_mask_8).astype(np.float32)
            seg_mask_64 = np.expand_dims(seg_mask_64, axis=0)
            ####

            rescaled_sample = [
                tf.image.convert_image_dtype(sample, np.uint8)
                for sample in sample_image
            ]
            rescaled_sample = np.array(rescaled_sample).astype(np.float32)
            sample_image = np.array(sample_image).astype(np.float32)

            # Get fake image
            fake_A = self.generator(rescaled_sample)
            fake_img = fake_A

            sample_image = (sample_image * 2) - 1

            image_path = os.path.join(args.test_dir,
                                      os.path.basename(sample_file))
            real_image_copy = os.path.join(
                args.test_dir, "real_" + os.path.basename(sample_file))
            # save_images(sample_image, [1, 1], real_image_copy)
            save_images(fake_img, [1, 1], image_path)

            # Get fake image
            actual_image = get_img(sample_image, [1, 1])
            fake_img = get_img(fake_A, [1, 1])

            output_images.append(fake_img)

            lt1, lp1 = scores_seg_fake(seg_image, fake_img)
            preds1 += list(lp1)
            gts1 += list(lt1)

        print("score")
        score = scores(gts1, preds1, n_class=args.segment_class)
        score_df = pd.DataFrame(score)

        print("\n[*] ------------")
        print("[*] Test scores:\n")

        with train_summary_writer.as_default():
            tf.summary.scalar('Overall Accuracy',
                              score["Overall Acc"],
                              step=epoch)
            tf.summary.scalar('Mean Accuracy', score["Mean Acc"], step=epoch)
            tf.summary.scalar('Frequency Weighted Accuracy',
                              score["FreqW Acc"],
                              step=epoch)
            tf.summary.scalar('Mean IoU', score["Mean IoU"], step=epoch)

        ########
        # if plot_labels:
        #     title="[*] Labels: seg_image | fake_img"
        #     name1="seg_image"
        #     name2="fake_image"
        #     for lt, lp in zip(gts1, preds1):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("---------------------------")
        # print("lt: seg_img || lp: fake_img")
        # print(score_df)

        # ########
        # if plot_labels:
        #     title="[*] Labels: seg_class_mask | crf(sample_image)"
        #     name1="seg_class_mask"
        #     name2="crf(sample_image, seg_class_mask)"
        #     for lt, lp in zip(gts2, preds2):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("---------------------------")
        # print("lt: seg_mask || lp: crf(test sample)")
        # print(score_crf_df)

        # ########
        # if plot_labels:
        #     title="[*] Labels: fake_img | crf(sample_image, seg_mask)"
        #     name1="fake_img"
        #     name2="crf(sample_image, seg_mask)"
        #     for lt, lp in zip(gts3, preds3):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("-------------------------------------")
        # print("lt: fake_img || lp: crf(sample_image, seg_mask)")
        # print(score_crf_2_df)

        # #########
        # if plot_labels:
        #     title="[*] Labels: seg_image | fake_img"
        #     name1="seg_image"
        #     name2="da_fake"
        #     for lt, lp in zip(gts4, preds4):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("----------------------------")
        # print("lt: seg_image || lp: da_fake")
        # print(score_d_df)

        # #########
        # if plot_labels:
        #     title="[*] Labels: seg_mask | crf(sample_image, fake_img)"
        #     name1="seg_mask"
        #     name2="crf(sample_image, fake_img)"
        #     for lt, lp in zip(gts5, preds5):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("----------------------------")
        # print("lt: seg_mask | lp: crf(sample_image, fake_img)")
        # print(score_crf_3_df)
        # print("Making multiple image tensor:", len(output_images))

        if (len(output_images) <= 1):
            return output_images[0]
        else:
            output_tensor = tf.concat([output_images[0], output_images[1]],
                                      axis=0)
            for i in range(2, len(output_images)):
                output_tensor = tf.concat([output_tensor, output_images[i]],
                                          axis=0)

            return output_tensor