Beispiel #1
0
def main():
    opt = get_args()
    print(opt)

    device = get_device()
    print("Device count", torch.cuda.device_count())

    if opt.seed > -1:
        set_seed(opt.seed)

    output_path = Path("results") / "resnet" / opt.name
    output_path.mkdir(parents=True, exist_ok=True)

    # Save options.
    with open(output_path / "opt.json", "w") as f:
        json.dump(vars(opt), f, indent=4)

    logger = ResNetLogger(opt.name, opt.n_epochs, opt.tensorboard)

    use_pretrained = False
    feature_extract = False
    n_classes = 5

    # Freeze layers if we're only using it for feature extraction.
    model = create_small_retina_model(use_pretrained,
                                      feature_extract,
                                      n_classes,
                                      load_name=opt.load_name)
    model = model.to(device)

    train(
        model=model,
        num_epochs=opt.n_epochs,
        log_interval=opt.log_interval,
        val_interval=opt.val_interval,
        batch_size=opt.batch_size,
        img_size=opt.img_size,
        lr=opt.lr,
        logger=logger,
        device=device,
        feature_extract=feature_extract,
        use_hdf5=opt.use_hdf5,
        synthetic_name=opt.synthetic_name,
        n_synthetic=opt.n_synthetic,
        use_real=opt.use_real,
    )

    checkpoint_path = output_path / "checkpoints"
    checkpoint_path.mkdir(parents=True, exist_ok=True)

    torch.save(model.state_dict(), checkpoint_path / "model_latest.pth")

    print("Finished!")
def main():
    opt = get_args()
    # TODO(sonjoonho): Save options.

    if opt.seed > 0:
        set_seed(opt.seed)

    output_path = Path("results") / "resnet_labels" / opt.name
    output_path.mkdir(parents=True, exist_ok=True)

    logger = ResNetLogger(opt.name, opt.n_epochs, opt.tensorboard)

    use_pretrained = False
    feature_extract = False

    # Freeze layers if we're only using it for feature extraction.
    device = get_device()
    model = create_label_model(use_pretrained, feature_extract)
    model = model.to(device)

    train(
        model=model,
        num_epochs=opt.n_epochs,
        log_interval=opt.log_interval,
        val_interval=opt.val_interval,
        batch_size=opt.batch_size,
        img_size=opt.img_size,
        lr=opt.lr,
        logger=logger,
        device=device,
        feature_extract=feature_extract,
        use_synthetic=opt.use_synthetic,
    )

    checkpoint_path = output_path / "checkpoints"
    checkpoint_path.mkdir(parents=True, exist_ok=True)

    torch.save(model.state_dict(), checkpoint_path / "model_latest.pth")
Beispiel #3
0
def main():
    img_size = 512
    batch_size = 64

    opt = get_args()
    name = opt.name

    output_path = Path(opt.out_dir)
    output_path.mkdir(parents=True, exist_ok=True)

    device = get_device()

    model_path = (Path(opt.out_dir) / "resnet" / name / "checkpoints" /
                  "model_latest.pth")
    model = load_small_retina_model(model_path)
    model = model.to(device)

    if opt.dataset == "eyepacs":
        val_dataset = HDF5EyePACS(train=False)
    elif opt.dataset == "test":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = GradingDataset(image_transform=transform,
                                     mode=GradingDataset.TEST)
    elif opt.dataset == "val":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = GradingDataset(
            image_transform=transform,
            mode=GradingDataset.VALIDATION,
        )
    elif opt.dataset == "copypaste":
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = CopyPasteDataset(image_transform=transform,
                                       return_label=False)
    else:
        transform = T.Compose([T.Resize(512), T.ToTensor()])
        val_dataset = SyntheticDataset(
            name=opt.dataset,
            image_transform=transform,
            return_inst=False,
            return_image=False,
            return_label=False,
        )

    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        num_workers=8,
        pin_memory=True,
        shuffle=False,
    )

    tta_transform = T.RandomAffine(degrees=360, translate=(0.1, 0.1))
    n_val_samples = len(val_dataset)
    predictions = np.empty(n_val_samples, dtype=int)
    actual = np.empty(n_val_samples, dtype=int)
    for i, batch in enumerate(tqdm(val_loader)):
        images, grades = batch["transformed"], batch["grade"]
        images = images.to(device)
        grades = grades.to(device)

        if opt.tta:
            tta_preds = torch.empty((opt.tta_runs, images.shape[0], 5),
                                    dtype=float).to(device)
            for run in range(opt.tta_runs):
                images = tta_transform(images)
                with torch.no_grad():
                    outputs = model(images)
                tta_preds[run, :, :] = outputs
            tta_preds = torch.mean(tta_preds, dim=0)
            preds = torch.argmax(tta_preds, dim=1)
        else:
            with torch.no_grad():
                outputs = model(images)
            preds = torch.argmax(outputs, dim=1)

        predictions[i * batch_size:i * batch_size +
                    images.shape[0]] = preds.cpu().numpy()
        actual[i * batch_size:i * batch_size +
               images.shape[0]] = grades.cpu().numpy()

    metrics = {
        "accuracy": accuracy_score(actual, predictions),
        "precision": precision_score(actual, predictions, average="macro"),
        "recall": recall_score(actual, predictions, average="macro"),
        "f1": f1_score(actual, predictions, average="macro"),
        "kappa": quadratic_kappa(actual, predictions),
        "tta": opt.tta,
        "tta_runs": opt.tta_runs,
    }
    print("Accuracy: ", metrics["accuracy"])
    print("Precision: ", metrics["precision"])
    print("Recall: ", metrics["recall"])
    print("F1: ", metrics["f1"])
    print("Cohen's", metrics["kappa"])

    time = timestamp()

    # Save options.
    with open(output_path / f"metrics-{time}.json", "w") as f:
        json.dump(vars(opt), f, indent=4)
def main():
    # TODO(sonjoonho): Add argument parsing for options.

    out_dir = "results/"
    img_size = 512
    batch_size = 64

    opt = get_args()
    name = opt.name

    output_path = Path(opt.out_dir) / name
    output_path.mkdir(parents=True, exist_ok=True)

    device = get_device()

    model_path = (Path(out_dir) / "resnet_labels" / name / "checkpoints" /
                  "model_latest.pth")
    model = load_label_model(model_path)
    model = model.to(device)

    transform = T.Compose([
        T.Resize(img_size, interpolation=InterpolationMode.NEAREST),
        T.ToTensor(),
    ])

    if opt.dataset == "real":
        test_dataset = CombinedDataset(
            label_transform=transform,
            return_image=False,
            return_inst=False,
            return_transformed=False,
            mode=CombinedDataset.VALIDATION,
        )
        test_dataset.df = test_dataset.df[test_dataset.df["Source"] == "FGADR"]
    elif opt.dataset == "copypaste":
        test_dataset = CopyPasteDataset(
            label_transform=transform,
            return_transformed=False,
        )
    else:
        test_dataset = SyntheticDataset(
            opt.dataset,
            label_transform=transform,
            return_image=False,
            return_inst=False,
            return_transformed=False,
        )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        num_workers=8,
        pin_memory=True,
        shuffle=False,
    )

    n_val_samples = len(test_dataset)
    predictions = np.empty(n_val_samples, dtype=int)
    actual = np.empty(n_val_samples, dtype=int)

    print(f"Validation samples: {n_val_samples}")

    for i, batch in enumerate(tqdm(test_loader)):
        images, grades = batch["label"], batch["grade"]
        images = get_label_semantics(images)

        images = images.to(device)
        grades = grades.to(device)

        with torch.no_grad():
            outputs = model(images)
        preds = torch.argmax(outputs, dim=1)

        predictions[i * batch_size:i * batch_size +
                    images.shape[0]] = preds.cpu().numpy()
        actual[i * batch_size:i * batch_size +
               images.shape[0]] = grades.cpu().numpy()

    metrics = {
        "accuracy": accuracy_score(actual, predictions),
        "precision": precision_score(actual, predictions, average="macro"),
        "recall": recall_score(actual, predictions, average="macro"),
        "f1": f1_score(actual, predictions, average="macro"),
        "kappa": quadratic_kappa(actual, predictions),
        "tta": opt.tta,
        "tta_runs": opt.tta_runs,
    }
    print("Accuracy: ", metrics["accuracy"])
    print("Precision: ", metrics["precision"])
    print("Recall: ", metrics["recall"])
    print("F1: ", metrics["f1"])
    print("Cohen's", metrics["kappa"])

    time = timestamp()

    # Save options.
    with open(output_path / f"metrics-{time}.json", "w") as f:
        json.dump(vars(opt), f, indent=4)
Beispiel #5
0
def main():
    opt = get_args()

    img_size = 512
    batch_size = 1

    device = get_device()

    which_labels = sorted([Labels[l] for l in opt.lesions], key=lambda x: x.value)
    n_classes = len(which_labels) + 1

    model = create_model(opt.name, n_classes)
    model.eval()
    model.to(device)

    image_transform, label_transform, _ = make_transforms(img_size)

    if opt.dataset == "test":
        dataset = CombinedDataset(
            image_transform=image_transform,
            label_transform=label_transform,
            mode=CombinedDataset.TEST,
        )
    elif opt.dataset == "val":
        dataset = CombinedDataset(
            image_transform=image_transform,
            label_transform=label_transform,
            mode=CombinedDataset.VALIDATION,
        )
    elif opt.dataset == "copypaste":
        dataset = CopyPasteDataset(
            image_transform=image_transform, label_transform=label_transform
        )
    else:
        dataset = SyntheticDataset(
            name=opt.dataset,
            image_transform=image_transform,
            label_transform=label_transform,
        )

    val_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=8,
        pin_memory=True,
    )

    total_dice = 0
    total_precision = 0
    total_recall = 0
    n_val = 0
    for batch in tqdm(val_loader):
        images, masks_true = batch["transformed"], batch["label"]
        images = images.to(device=device, dtype=torch.float32)

        n_val += 1

        masks_true = get_labels(which_labels, masks_true)[:, :-1, :, :]

        masks_true = masks_true.to(device=device, dtype=torch.float)

        masks_true = torch.argmax(masks_true, dim=1, keepdim=True)

        with torch.no_grad():
            masks_pred = model(images)[:, :, :, :]

        masks_pred = F.softmax(masks_pred, dim=1)
        masks_pred = (masks_pred > 0.5).float()[:, :-1, :, :]
        masks_pred = torch.argmax(masks_pred, dim=1, keepdim=True)

        save_image(colour_labels_flat(masks_pred), "pred.png")
        save_image(colour_labels_flat(masks_true), "true.png")

        batch_precision, batch_recall, batch_f1 = compute_precision_recall_f1(
            masks_pred, masks_true
        )

        total_dice += batch_f1
        total_precision += batch_precision
        total_recall += batch_recall

    dice = total_dice / n_val
    precision = total_precision / n_val
    recall = total_recall / n_val
    print(opt.name)
    print(f"Precision: {precision}")
    print(f"Recall: {recall}")
    print(f"Dice: {dice}")
    print(f"N Val {n_val}")
Beispiel #6
0
def main():
    opt = get_args()
    print(opt)
    set_seed(213)

    if opt.conditional:
        n_classes = 5
    else:
        n_classes = None

    device = get_device()
    print(f"Device count: {torch.cuda.device_count()}")

    output_path = Path(opt.output_dir) / "progan" / opt.name
    output_path.mkdir(parents=True, exist_ok=True)

    # Save options.
    with open(output_path / "opt.json", "w") as f:
        json.dump(vars(opt), f, indent=4)

    n_channels = len(opt.lesions) + 1

    # The initial resolution will be 2**2 = 4.
    # The final resolution will be 2**9 = 512.
    start_depth = 2
    final_depth = 9
    generator = Generator(
        depth=final_depth,
        n_channels=n_channels,
        latent_size=opt.latent_dim,
        n_classes=n_classes,
    )
    discriminator = Discriminator(
        depth=final_depth,
        num_channels=n_channels,
        latent_size=opt.latent_dim,
        n_classes=n_classes,
    )

    generator = DataParallel(generator)
    discriminator = DataParallel(discriminator)

    generator.to(device)
    discriminator.to(device)

    transform = transforms.Compose(
        [
            transforms.Resize(opt.img_size, InterpolationMode.NEAREST),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
        ],
    )
    dataset = CombinedDataset(
        return_inst=False,
        return_image=False,
        return_transformed=False,
        label_transform=transform,
    )
    if opt.use_copypaste:
        synthetic_dataset = CopyPasteDataset(label_transform=transform)
        dataset = ConcatDataset((dataset, synthetic_dataset))

    depth_epochs = [20, 40, 60, 80, 100, 120, 140, 160]
    batch_sizes = [512, 256, 128, 64, 32, 16, 8, 4]

    n_stages = final_depth - start_depth + 1
    assert len(depth_epochs) == n_stages
    assert len(batch_sizes) == n_stages

    total_epochs = sum(depth_epochs)
    logger = ProGANLogger(opt.name, total_epochs, opt.tensorboard)

    start_time = time.time()

    train(
        generator,
        discriminator,
        dataset,
        batch_sizes,
        device,
        output_path,
        opt.lr_g,
        opt.lr_d,
        depth_epochs,
        opt.n_critic,
        opt.n_gen,
        opt.clip_gradient,
        opt.label_smoothing,
        opt.latent_dim,
        n_classes,
        opt.sample_interval,
        opt.chkpt_interval,
        logger,
        opt.log_step,
        opt.lesions,
        opt.use_ada,
        start_depth,
        final_depth,
    )

    logger.close()

    end_time = time.time()
    # Throw away fractional seconds since we don't need that level of precision.
    execution_time = int(end_time - start_time)
    print(f"Finished in {format_seconds(execution_time)}")
def main():
    opt = get_args()
    print(opt)

    device = get_device()
    print("Device count", torch.cuda.device_count())

    if opt.seed > -1:
        set_seed(opt.seed)

    output_path = Path("results") / "unet" / opt.name
    checkpoint_path = output_path / "checkpoints"
    checkpoint_path.mkdir(parents=True, exist_ok=True)

    # Save options.
    with open(output_path / "opt.json", "w") as f:
        json.dump(vars(opt), f, indent=4)

    which_labels = sorted([Labels[l] for l in opt.lesions],
                          key=lambda x: x.value)
    n_classes = len(which_labels) + 1

    model = create_model(opt.load_name, n_classes)
    model = model.to(device=device)

    train_loader, val_loader = make_dataloaders(
        opt.img_size,
        opt.n_synthetic,
        opt.batch_size,
        opt.synthetic_name,
    )

    n_train = len(train_loader.dataset)
    n_val = len(val_loader.dataset)

    print(f"""
        Name:            {opt.name}
        Epochs:          {opt.n_epochs}
        Training size:   {n_train}
        Validation size: {n_val}
        Real size:       {opt.n_real}
        Synthetic size:  {opt.n_synthetic}
        Labels:          {which_labels}
        """)

    logger = UNetLogger(opt.name, opt.n_epochs, opt.tensorboard)

    train(
        model=model,
        epochs=opt.n_epochs,
        lr=opt.lr,
        device=device,
        log_interval=opt.log_interval,
        val_interval=opt.val_interval,
        checkpoint_path=checkpoint_path,
        train_loader=train_loader,
        val_loader=val_loader,
        logger=logger,
        labels=which_labels,
    )

    logger.close()

    print("Finished!")
Beispiel #8
0
def main():
    opt = get_args()
    print(opt)
    set_seed(213)
    n_classes = 5

    device = get_device()

    output_path = Path(opt.output_dir) / "acgan" / opt.name
    output_path.mkdir(parents=True, exist_ok=True)

    # Save options.
    with open(output_path / "opt.json", "w") as f:
        json.dump(vars(opt), f, indent=4)

    n_channels = len(opt.lesions) + 1

    generator = Generator(n_channels, opt.img_size, n_classes, opt.latent_dim)
    discriminator = Discriminator(n_channels, opt.img_size, n_classes)

    generator.to(device)
    discriminator.to(device)

    # Initialize weights.
    generator.apply(weights_init_normal)
    discriminator.apply(weights_init_normal)

    transform = transforms.Compose([
        transforms.Resize(opt.img_size, InterpolationMode.NEAREST),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
    ], )
    dataset = CombinedDataset(
        return_inst=False,
        return_image=False,
        return_transformed=False,
        return_filename=False,
        label_transform=transform,
    )
    if opt.filter_dataset:
        dataset.df = dataset.df[dataset.df["Source"] == opt.filter_dataset]

    if opt.use_copypaste:
        synthetic_dataset = CopyPasteDataset(label_transform=transform)
        dataset = ConcatDataset((dataset, synthetic_dataset))

    dataloader = DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        drop_last=True,
    )

    logger = ACGANLogger(opt.name, opt.n_epochs, opt.tensorboard)

    start_time = time.time()

    print(f"Train size: {len(dataset)}")

    train(
        generator,
        discriminator,
        dataloader,
        device,
        output_path,
        opt.lr_g,
        opt.lr_d,
        opt.n_epochs,
        opt.n_critic,
        opt.n_gen,
        opt.clip_gradient,
        opt.label_smoothing,
        opt.latent_dim,
        n_classes,
        opt.sample_interval,
        opt.val_interval,
        opt.chkpt_interval,
        logger,
        opt.log_step,
        opt.lesions,
        opt.use_ada,
    )

    logger.close()

    end_time = time.time()
    # Throw away fractional seconds since we don't need that level of precision.
    execution_time = int(end_time - start_time)
    print(f"Finished in {format_seconds(execution_time)}")