batch_size = 32
num_workers = 12

mean = [-17.398721187929123, -10.020421713800838, -12.10841437771272]
std = [6.290316422115964, 5.776936185931195, 5.795418280085563]
max_value = 1.0

transforms = A.Compose(
    [A.Normalize(mean=mean, std=std, max_pixel_value=max_value),
     ToTensorV2()])

_, data_loader, _ = get_train_val_loaders(
    train_ds,
    val_ds,
    train_transforms=transforms,
    val_transforms=transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size,
    pin_memory=True,
)

prepare_batch = inference_prepare_batch_f32

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)

#################### Model ####################

model = FPN(encoder_name='se_resnext50_32x4d', classes=2, encoder_weights=None)
run_uuid = "5230c20f609646cb9870a211036ea5cb"
weights_filename = "best_model_67_val_miou_bg=0.7574240313552584.pth"
예제 #2
0
        A.Flip(),
    ]),
    A.Normalize(mean=mean, std=std, max_pixel_value=max_value),
    ToTensorV2()
])

val_transforms = A.Compose(
    [A.Normalize(mean=mean, std=std, max_pixel_value=max_value),
     ToTensorV2()])

train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    train_ds,
    val_ds,
    train_transforms=train_transforms,
    val_transforms=val_transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=val_batch_size,
    pin_memory=True,
    train_sampler=train_sampler,
    limit_train_num_samples=100 if debug else None,
    limit_val_num_samples=100 if debug else None)

accumulation_steps = 2

prepare_batch = prepare_batch_fp32


# Image denormalization function to plot predictions with images
def img_denormalize(nimg):
    img = denormalize(nimg, mean=mean, std=std)
    return img[(0, 1, 2), :, :]
예제 #3
0
val_transforms = A.Compose(
    [
        A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
        A.Normalize(mean=mean, std=std),
        ignore_mask_boundaries,
        ToTensor(),
    ]
)


train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    root_path=data_path,
    train_transforms=train_transforms,
    val_transforms=val_transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=val_batch_size,
    limit_train_num_samples=100 if debug else None,
    limit_val_num_samples=100 if debug else None,
)

prepare_batch = prepare_batch_fp32

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))

# ##############################
# Setup models
# ##############################

num_classes = 21
예제 #4
0
])

val_transforms = A.Compose([
    A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
    A.Normalize(mean=mean, std=std),
    ignore_mask_boundaries,
    ToTensor(),
])

train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    root_path=data_path,
    train_transforms=train_transforms,
    val_transforms=val_transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=val_batch_size,
    with_sbd=sbd_data_path,
    train_sampler='distributed',
    val_sampler='distributed',
    limit_train_num_samples=100 if debug else None,
    limit_val_num_samples=100 if debug else None,
    random_seed=seed)

prepare_batch = prepare_batch_fp32

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize,
                          mean=(0.485, 0.456, 0.406),
                          std=(0.229, 0.224, 0.225))

# ##############################
예제 #5
0
    ToTensor(),
])

val_transforms = A.Compose([
    # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76
    A.Resize(int((256 / 224) * val_crop_size), int(
        (256 / 224) * val_crop_size)),
    A.CenterCrop(val_crop_size, val_crop_size),
    A.Normalize(mean=mean, std=std),
    ToTensor(),
])

train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    data_path,
    train_transforms=train_transforms,
    val_transforms=val_transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size,
)

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)

# ##############################
# Setup Model
# ##############################

model = resnet50(pretrained=False)

# ##############################
# Setup Solver
예제 #6
0
val_transforms = A.Compose([
    # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76
    A.Resize(int((256 / 224) * val_crop_size), int(
        (256 / 224) * val_crop_size)),
    A.CenterCrop(val_crop_size, val_crop_size),
    A.Normalize(mean=mean, std=std),
    ToTensor(),
])

train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    data_path,
    train_transforms=train_transforms,
    val_transforms=val_transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size,
    pin_memory=True,
    train_sampler="distributed",
    val_sampler="distributed",
)

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)

# ##############################
# Setup Model
# ##############################

model = resnet50(pretrained=False)
예제 #7
0
    CenterCrop(img_size, img_size),
    Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    ignore_mask_boundaries,
    ToTensor(),
])
val_transform_fn = lambda dp: val_transforms(**dp)

batch_size = 8
num_workers = 10
non_blocking = True

train_loader, val_loader, train_eval_loader = get_train_val_loaders(
    root_path=data_path,
    train_transforms=train_transform_fn,
    val_transforms=val_transform_fn,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size * 2,
    limit_train_num_samples=250 if debug else None,
    limit_val_num_samples=250 if debug else None,
    random_seed=seed)

prepare_batch = prepare_batch_fp32

val_interval = 5

num_classes = 21
model = DeepLabV3(build_resnet18_backbone, num_classes=num_classes)

criterion = nn.CrossEntropyLoss()

lr = 0.007 / 4.0 * batch_size
예제 #8
0
data_dir = Path('/media/wwymak/Storage/spacenet/AOI_3_Paris_Train')
image_dir = data_dir / 'RGB-PanSharpen'
mask_dir = data_dir / 'masks'
summary_data_filepath = data_dir / 'summaryData' / 'AOI_3_Paris_Train_Building_Solutions.csv'

mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)


train_transforms = get_train_augmentation()
val_transforms = get_validation_augmentation()


train_loader, val_loader, train_eval_loader = get_train_val_loaders(summary_data_filepath=summary_data_filepath,
                                                                    train_transforms=get_train_augmentation(),
                                                                    val_transforms=get_validation_augmentation(),
                                                                    batch_size=batch_size, num_workers=num_workers,
                                                                    limit_train_num_samples=100 if debug else None,
                                                                    limit_val_num_samples=100 if debug else None)

prepare_batch = prepare_batch_fp32

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))

# ##############################
# Setup models
# ##############################

num_classes = 1
# model = deeplabv3_resnet101(num_classes=num_classes)
model, preprocessing_function = unet_resnet('efficientnet-b0')
batch_size = 4
num_workers = 12

if __name__ == "__main__":
    print('here')
    model, preprocessing_fn = unet_resnet(ENCODER)
    runner = SupervisedRunner(device="cuda:0", input_key="image", input_target_key="mask", model=model)

    summary_data_df = pd.read_csv(summary_data)
    ground_truth_data_df = pd.read_csv(ground_truth_data)

    train_loader, val_loader, test_loader = get_train_val_loaders(
        summary_data_filepath=summary_data,
        train_transforms=get_train_augmentation(is_display=False, image_size=512, normalisation_fn=None),
        val_transforms=get_validation_augmentation(is_display=False, image_size=512, normalisation_fn=None),
        batch_size=batch_size,
        num_workers=num_workers, drop_empty_images=False
    )

    predictions_all = []
    image_filepaths_all = []
    image_ids_all = []

    for loader in [train_loader, val_loader, test_loader]:
        predictions = np.vstack(list(map(
            lambda x: x["logits"].cpu().numpy(),
            runner.predict_loader(loader=loader, resume=f"{logdir}/checkpoints/best.pth")
        )))
        image_filepaths = loader.dataset.image_filepath_list
        predictions_all.append(predictions)