def log(self, m: ProGANMetrics):
        if self.tensorboard is not None:
            writer = self.tensorboard

            writer.add_scalar("Loss/Generator", m.g_loss, m.step)
            writer.add_scalar("Loss/Discriminator", m.d_loss, m.step)
            writer.add_scalar("Accuracy/D_x", m.D_x, m.step)
            writer.add_scalar("Accuracy/D_G_z", m.D_G_z, m.step)
            writer.add_scalar("Accuracy/Label", m.d_acc, m.step)
            writer.add_scalar("ADA/p", m.ada_p, m.step)
            writer.add_scalar("ADA/r_t", m.ada_r, m.step)
            writer.add_images("Images/Real", colour_labels(m.real_imgs),
                              m.step)
            writer.add_images("Images/Fake", colour_labels(m.gen_imgs), m.step)

        time = timestamp()
        print(
            f"[{time}] "
            f"[Depth {m.depth}] "
            f"[Epoch {m.epoch}/{self.n_epochs}] "
            f"[D loss: {m.d_loss:.4f}, acc: {100 * m.d_acc:.2f}%] "
            f"[G loss: {m.g_loss:.4f}] "
            f"[p: {m.ada_p:.2f}] "
            f"[r_t: {m.ada_r:.2f}]",
            flush=True,
        )
    def log_val(self, m: UNetTrainMetrics):
        if self.tensorboard is not None:
            writer = self.tensorboard
            writer.add_scalar("Loss/Validation", m.loss, m.step)

        time = timestamp()
        print(bold(f"[{time}]\t" f"[Loss: {m.loss:.4f}]\t"))
    def log_val(self, m: ResNetValidateMetrics):
        if self.tensorboard is not None:
            writer = self.tensorboard
            writer.add_scalar("Loss/Validation", m.loss, m.step)
            writer.add_scalar("Accuracy/Validation", m.acc, m.step)

        time = timestamp()
        print(
            bold(f"[{time}] [Validate]\t"
                 f"[Loss: {m.loss:.4f}]\t"
                 f"[Accuracy: {m.acc:.2f}]"))
    def log_train(self, m: ResNetTrainMetrics):
        if self.tensorboard is not None:
            writer = self.tensorboard
            writer.add_scalar("Loss/Training", m.loss, m.step)
            writer.add_scalar("Accuracy/Training", m.acc, m.step)

        time = timestamp()
        print(f"[{time}]\t"
              f"[Training {m.epoch}]\t"
              f"[Loss: {m.loss:.4f}]\t"
              f"[Accuracy: {m.acc:.2f}]")
    def log_train(self, m: UNetTrainMetrics):
        if self.tensorboard is not None:
            writer = self.tensorboard
            writer.add_scalar("Loss/Training", m.loss, m.step)

            writer.add_images("Images", m.images, m.step)
            writer.add_images(
                "Masks/true", colour_labels(m.masks_true), m.step, dataformats="NCHW"
            )
            writer.add_images(
                "Masks/pred",
                colour_labels(m.masks_pred),
                m.step,
                dataformats="NCHW",
            )

        time = timestamp()
        print(f"[{time}]\t" f"[{m.epoch}]\t" f"[Loss: {m.loss:.4f}]\t")
Ejemplo n.º 6
0
def main():
    img_size = 512
    batch_size = 64

    opt = get_args()
    name = opt.name

    output_path = Path(opt.out_dir)
    output_path.mkdir(parents=True, exist_ok=True)

    device = get_device()

    model_path = (Path(opt.out_dir) / "resnet" / name / "checkpoints" /
                  "model_latest.pth")
    model = load_small_retina_model(model_path)
    model = model.to(device)

    if opt.dataset == "eyepacs":
        val_dataset = HDF5EyePACS(train=False)
    elif opt.dataset == "test":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = GradingDataset(image_transform=transform,
                                     mode=GradingDataset.TEST)
    elif opt.dataset == "val":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = GradingDataset(
            image_transform=transform,
            mode=GradingDataset.VALIDATION,
        )
    elif opt.dataset == "copypaste":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = CopyPasteDataset(image_transform=transform,
                                       return_label=False)
    else:
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = SyntheticDataset(
            name=opt.dataset,
            image_transform=transform,
            return_inst=False,
            return_image=False,
            return_label=False,
        )

    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        num_workers=8,
        pin_memory=True,
        shuffle=False,
    )

    tta_transform = T.RandomAffine(degrees=360, translate=(0.1, 0.1))
    n_val_samples = len(val_dataset)
    predictions = np.empty(n_val_samples, dtype=int)
    actual = np.empty(n_val_samples, dtype=int)
    for i, batch in enumerate(tqdm(val_loader)):
        images, grades = batch["transformed"], batch["grade"]
        images = images.to(device)
        grades = grades.to(device)

        if opt.tta:
            tta_preds = torch.empty((opt.tta_runs, images.shape[0], 5),
                                    dtype=float).to(device)
            for run in range(opt.tta_runs):
                images = tta_transform(images)
                with torch.no_grad():
                    outputs = model(images)
                tta_preds[run, :, :] = outputs
            tta_preds = torch.mean(tta_preds, dim=0)
            preds = torch.argmax(tta_preds, dim=1)
        else:
            with torch.no_grad():
                outputs = model(images)
            preds = torch.argmax(outputs, dim=1)

        predictions[i * batch_size:i * batch_size +
                    images.shape[0]] = preds.cpu().numpy()
        actual[i * batch_size:i * batch_size +
               images.shape[0]] = grades.cpu().numpy()

    metrics = {
        "accuracy": accuracy_score(actual, predictions),
        "precision": precision_score(actual, predictions, average="macro"),
        "recall": recall_score(actual, predictions, average="macro"),
        "f1": f1_score(actual, predictions, average="macro"),
        "kappa": quadratic_kappa(actual, predictions),
        "tta": opt.tta,
        "tta_runs": opt.tta_runs,
    }
    print("Accuracy: ", metrics["accuracy"])
    print("Precision: ", metrics["precision"])
    print("Recall: ", metrics["recall"])
    print("F1: ", metrics["f1"])
    print("Cohen's", metrics["kappa"])

    time = timestamp()

    # Save options.
    with open(output_path / f"metrics-{time}.json", "w") as f:
        json.dump(vars(opt), f, indent=4)
def main():
    # TODO(sonjoonho): Add argument parsing for options.

    out_dir = "results/"
    img_size = 512
    batch_size = 64

    opt = get_args()
    name = opt.name

    output_path = Path(opt.out_dir) / name
    output_path.mkdir(parents=True, exist_ok=True)

    device = get_device()

    model_path = (Path(out_dir) / "resnet_labels" / name / "checkpoints" /
                  "model_latest.pth")
    model = load_label_model(model_path)
    model = model.to(device)

    transform = T.Compose([
        T.Resize(img_size, interpolation=InterpolationMode.NEAREST),
        T.ToTensor(),
    ])

    if opt.dataset == "real":
        test_dataset = CombinedDataset(
            label_transform=transform,
            return_image=False,
            return_inst=False,
            return_transformed=False,
            mode=CombinedDataset.VALIDATION,
        )
        test_dataset.df = test_dataset.df[test_dataset.df["Source"] == "FGADR"]
    elif opt.dataset == "copypaste":
        test_dataset = CopyPasteDataset(
            label_transform=transform,
            return_transformed=False,
        )
    else:
        test_dataset = SyntheticDataset(
            opt.dataset,
            label_transform=transform,
            return_image=False,
            return_inst=False,
            return_transformed=False,
        )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        num_workers=8,
        pin_memory=True,
        shuffle=False,
    )

    n_val_samples = len(test_dataset)
    predictions = np.empty(n_val_samples, dtype=int)
    actual = np.empty(n_val_samples, dtype=int)

    print(f"Validation samples: {n_val_samples}")

    for i, batch in enumerate(tqdm(test_loader)):
        images, grades = batch["label"], batch["grade"]
        images = get_label_semantics(images)

        images = images.to(device)
        grades = grades.to(device)

        with torch.no_grad():
            outputs = model(images)
        preds = torch.argmax(outputs, dim=1)

        predictions[i * batch_size:i * batch_size +
                    images.shape[0]] = preds.cpu().numpy()
        actual[i * batch_size:i * batch_size +
               images.shape[0]] = grades.cpu().numpy()

    metrics = {
        "accuracy": accuracy_score(actual, predictions),
        "precision": precision_score(actual, predictions, average="macro"),
        "recall": recall_score(actual, predictions, average="macro"),
        "f1": f1_score(actual, predictions, average="macro"),
        "kappa": quadratic_kappa(actual, predictions),
        "tta": opt.tta,
        "tta_runs": opt.tta_runs,
    }
    print("Accuracy: ", metrics["accuracy"])
    print("Precision: ", metrics["precision"])
    print("Recall: ", metrics["recall"])
    print("F1: ", metrics["f1"])
    print("Cohen's", metrics["kappa"])

    time = timestamp()

    # Save options.
    with open(output_path / f"metrics-{time}.json", "w") as f:
        json.dump(vars(opt), f, indent=4)