コード例 #1
0
ファイル: loader.py プロジェクト: chicm/clouds
def get_test_loader(encoder_type, batch_size=16):
    if encoder_type.startswith('myunet'):
        encoder_type = 'resnet50'

    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        encoder_type, 'imagenet')

    sub = pd.read_csv(os.path.join(settings.DATA_DIR, 'sample_submission.csv'))
    sub['label'] = sub['Image_Label'].apply(lambda x: x.split('_')[1])
    sub['im_id'] = sub['Image_Label'].apply(lambda x: x.split('_')[0])

    test_ids = sub['Image_Label'].apply(
        lambda x: x.split('_')[0]).drop_duplicates().values

    test_dataset = CloudDataset(
        df=sub,
        datatype='test',
        img_ids=test_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn))
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=24)

    return test_loader
コード例 #2
0
    def Predict(self, img_path, vis=True):
        '''
        User function: Run inference on image and visualize it. Output mask saved as output_mask.npy

        Args:
            img_path (str): Relative path to the image file
            vis (bool): If True, predicted mask is displayed.

        Returns:
            list: List of bounding box locations of predicted objects along with classes. 
        '''
        dirPath = "tmp_test"

        if (os.path.isdir(dirPath)):
            shutil.rmtree(dirPath)

        os.mkdir(dirPath)
        os.mkdir(dirPath + "/img_dir")
        os.mkdir(dirPath + "/gt_dir")

        os.system("cp " + img_path + " " + dirPath + "/img_dir")
        os.system("cp " + img_path + " " + dirPath + "/gt_dir")

        x_test_dir = dirPath + "/img_dir"
        y_test_dir = dirPath + "/gt_dir"

        if (self.system_dict["params"]["image_shape"][0] % 32 != 0):
            self.system_dict["params"]["image_shape"][0] += (
                32 - self.system_dict["params"]["image_shape"][0] % 32)

        if (self.system_dict["params"]["image_shape"][1] % 32 != 0):
            self.system_dict["params"]["image_shape"][1] += (
                32 - self.system_dict["params"]["image_shape"][1] % 32)

        preprocess_input = sm.get_preprocessing(
            self.system_dict["params"]["backbone"])
        test_dataset = Dataset(
            x_test_dir,
            y_test_dir,
            self.system_dict["params"]["classes_dict"],
            classes_to_train=self.system_dict["params"]["classes_to_train"],
            augmentation=get_validation_augmentation(
                self.system_dict["params"]["image_shape"][0],
                self.system_dict["params"]["image_shape"][1]),
            preprocessing=get_preprocessing(preprocess_input),
        )

        test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)

        image, gt_mask = test_dataset[0]
        image = np.expand_dims(image, axis=0)
        pr_mask = self.system_dict["local"]["model"].predict(image).round()
        np.save("output_mask.npy", pr_mask)

        if (vis):
            visualize(
                image=denormalize(image.squeeze()),
                pr_mask=pr_mask[..., 0].squeeze(),
            )
コード例 #3
0
def main():
    """Parameters initialization and starting attention model training """
    # read command line arguments
    args = get_parser().parse_args()

    # set random seed
    seed_everything(args.seed)

    # paths to dataset
    train_path = osp.join(args.dataset_path, 'train')
    test_path = osp.join(args.dataset_path, 'test')

    # declare Unet model with two ouput classes (occluders and their shadows)
    model = smp.Unet(encoder_name=args.encoder, classes=2, activation='sigmoid',)
    # replace the first convolutional layer in model: 4 channels tensor as model input
    model.encoder.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), \
                                    padding=(3, 3), bias=False)

    # declare datasets
    train_dataset = ARDataset(train_path, augmentation=get_training_augmentation(args.img_size), \
        preprocessing=get_preprocessing(),)

    valid_dataset = ARDataset(test_path, augmentation=get_validation_augmentation(args.img_size), \
        preprocessing=get_preprocessing(),)

    # declare loaders
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, \
                              shuffle=True, num_workers=args.num_workers)

    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, \
                              shuffle=False, num_workers=args.num_workers)

    # declare loss function, optimizer and metric
    loss = smp.utils.losses.DiceLoss()
    metric = smp.utils.metrics.IoU(threshold=args.iou_th)
    optimizer = torch.optim.Adam([dict(params=model.parameters(), lr=args.lr),])

    # tensorboard
    writer = SummaryWriter()

    # device
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')

    # start training
    train(
        writer=writer,
        n_epoch=args.n_epoch,
        train_loader=train_loader,
        valid_loader=valid_loader,
        model_path=args.model_path,
        model=model,
        loss=loss,
        metric=metric,
        optimizer=optimizer,
        device=device
    )
コード例 #4
0
def main(args):
    """
    Main code for creating the segmentation-only submission file. All masks are
    converted to either "" or RLEs

    Args:
        args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
    Returns:
        None
    """
    torch.cuda.empty_cache()
    gc.collect()

    attention_type = None if args.attention_type == "None" else args.attention_type
    model = smp.Unet(encoder_name=args.encoder,
                     encoder_weights=None,
                     classes=4,
                     activation=None,
                     attention_type=attention_type)
    # setting up the test I/O
    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        args.encoder, "imagenet")
    # setting up the train/val split with filenames
    train, sub, _ = setup_train_and_sub_df(args.dset_path)
    test_ids = sub["ImageId_ClassId"].apply(
        lambda x: x.split("_")[0]).drop_duplicates().values
    # datasets/data loaders
    test_dataset = SteelDataset(
        args.dset_path,
        df=sub,
        datatype="test",
        im_ids=test_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn))
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=0)

    loaders = {"test": test_loader}
    # loading the pickled class_params if they exist
    class_params_path = os.path.join(args.dset_path, "class_params.pickle")
    if os.path.exists(class_params_path):
        print(f"Loading {class_params_path}...")
        # Load data (deserialize)
        with open(class_params_path, "rb") as handle:
            class_params = pickle.load(handle)
    else:
        class_params = "default"

    create_submission(args.checkpoint_path,
                      model=model,
                      loaders=loaders,
                      sub=sub,
                      class_params=class_params)
コード例 #5
0
ファイル: test.py プロジェクト: Everypixel/arshadowgan-like
def test_arshadowgan():
    """Test ARShadowGAN-like architecture """
    # parse command line arguments
    args = get_parser().parse_args()

    # create folders
    if not osp.exists(args.result_path):
        os.makedirs(args.result_path)

    # dataset and dataloader declaration
    dataset = ARDataset(args.dataset_path,
                        augmentation=get_validation_augmentation(
                            args.img_size),
                        preprocessing=get_preprocessing(),
                        is_train=False)
    dataloader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.num_workers)

    # define device
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')

    # define model
    model = ARShadowGAN(encoder_att=args.attention_encoder,
                        encoder_SG=args.SG_encoder,
                        model_path_attention=args.path_att,
                        model_path_SG=args.path_SG,
                        device=device)

    # model in eval mode now
    model.eval()

    # inference
    counter = 0
    for i, data in enumerate(tqdm(dataloader)):
        tensor_att = torch.cat(
            (data[0][:, :3], torch.unsqueeze(data[1][:, -1], axis=1)),
            axis=1).to(device)
        tensor_SG = torch.cat(
            (data[2][:, :3], torch.unsqueeze(data[3][:, -1], axis=1)),
            axis=1).to(device)

        with torch.no_grad():
            result, output_mask1 = model(tensor_att, tensor_SG)

        for j in range(args.batch_size):
            counter += 1
            output_image = np.uint8(
                127.5 * (result.cpu().numpy()[j].transpose(1, 2, 0) + 1.0))
            output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
            cv2.imwrite(osp.join(args.result_path,
                                 str(counter) + '.png'), output_image)
コード例 #6
0
ファイル: loader.py プロジェクト: chicm/clouds
def get_train_val_loaders(encoder_type, batch_size=16, pseudo_label=False):
    if encoder_type.startswith('myunet'):
        encoder_type = 'resnet50'
    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        encoder_type, 'imagenet')
    train, train_ids, valid_ids = prepare_df()
    train['pseudo'] = 0
    pseudo_imgs = set()
    if pseudo_label:
        train_pseudo = prepare_df(
            train_file=f'{settings.DATA_DIR}/sub_blend_1111_1.csv',
            pseudo_label=True)
        train_pseudo['pseudo'] = 1
        pseudo_ids = train_pseudo.im_id.unique().tolist()
        print(pseudo_ids[:10])
        pseudo_imgs = set(pseudo_ids)
        train_ids.extend(pseudo_ids)
        train = pd.concat([train, train_pseudo])
        print(train.head())
        print(train_pseudo.head())
        print(train.shape)
        print(len(train_ids))

    num_workers = 24
    train_dataset = CloudDataset(
        df=train,
        datatype='train',
        img_ids=train_ids,
        transforms=get_training_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
        pseudo_imgs=pseudo_imgs)
    valid_dataset = CloudDataset(
        df=train,
        datatype='valid',
        img_ids=valid_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn))

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=num_workers)

    train_loader.num = len(train_ids)
    valid_loader.num = len(valid_ids)

    loaders = {"train": train_loader, "valid": valid_loader}
    return loaders
コード例 #7
0
def get_data_loaders(bs=8, num_workers=0, shuffle=True, ts=0.2):
    train_df, img_2_ohe_vector = get_df()
    train_imgs, val_imgs = train_test_split(train_df['Image'].values,
                            test_size=ts,
                            stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))),
                            random_state=42)
    print(train_imgs)
    print(val_imgs)
    print(len(train_imgs))
    print(len(val_imgs))
    train_dataset = CloudDataset(img_2_ohe_vector, img_ids=train_imgs,
                                 transforms=get_training_augmentation())
    train_loader = DataLoader(train_dataset, batch_size=bs,
                              shuffle=shuffle, num_workers=num_workers)

    val_dataset = CloudDataset(img_2_ohe_vector, img_ids=val_imgs,
                                 transforms=get_validation_augmentation())
    val_loader = DataLoader(val_dataset, batch_size=bs,
                              shuffle=shuffle, num_workers=num_workers)

    return train_loader, val_loader
def main(args):
    """
    Main code for creating the classification submission file. No masks predictions will be blank,
    and predictions for masks will be "1".

    Args:
        args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
    Returns:
        None
    """
    torch.cuda.empty_cache()
    gc.collect()

    model = ResNet34(pre=None, num_classes=4, use_simple_head=True)
    # setting up the test I/O
    preprocessing_fn = smp.encoders.get_preprocessing_fn("resnet34", "imagenet")
    train, sub, _ = setup_train_and_sub_df(args.dset_path)
    test_ids = sub["ImageId_ClassId"].apply(lambda x: x.split("_")[0]).drop_duplicates().values
    # datasets/data loaders
    test_dataset = ClassificationSteelDataset(
                                              args.dset_path, df=sub, datatype="test", im_ids=test_ids,
                                              transforms=get_validation_augmentation(),
                                              preprocessing=get_preprocessing(preprocessing_fn)
                                             )
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)

    loaders = {"test": test_loader}
    # loading the pickled class_params if they exist
    class_params_path = os.path.join(args.dset_path, "class_params_classification.pickle")
    if os.path.exists(class_params_path):
        print(f"Loading {class_params_path}...")
        # Load data (deserialize)
        with open(class_params_path, "rb") as handle:
            class_params = pickle.load(handle)
    else:
        class_params = "default"

    create_submission(args.checkpoint_path, model=model, loaders=loaders,
                      sub=sub, class_params=class_params)
コード例 #9
0
def main(args):
    """
    Main code for training a classification model.

    Args:
        args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
    Returns:
        None
    """
    # Reading the in the .csvs
    train = pd.read_csv(os.path.join(args.dset_path, "train.csv"))
    sub = pd.read_csv(os.path.join(args.dset_path, "sample_submission.csv"))

    # setting up the train/val split with filenames
    train, sub, id_mask_count = setup_train_and_sub_df(args.dset_path)
    # setting up the train/val split with filenames
    seed_everything(args.split_seed)
    train_ids, valid_ids = train_test_split(id_mask_count["im_id"].values,
                                            random_state=args.split_seed,
                                            stratify=id_mask_count["count"],
                                            test_size=args.test_size)
    # setting up the classification model
    ENCODER_WEIGHTS = "imagenet"
    DEVICE = "cuda"
    model = ResNet34(pre=ENCODER_WEIGHTS, num_classes=4, use_simple_head=True)

    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        "resnet34", ENCODER_WEIGHTS)

    # Setting up the I/O
    train_dataset = ClassificationSteelDataset(
        args.dset_path,
        df=train,
        datatype="train",
        im_ids=train_ids,
        transforms=get_training_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
    )
    valid_dataset = ClassificationSteelDataset(
        args.dset_path,
        df=train,
        datatype="valid",
        im_ids=valid_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
    )

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.num_workers)

    loaders = {"train": train_loader, "valid": valid_loader}
    # everything is saved here (i.e. weights + stats)
    logdir = "./logs/segmentation"

    # model, criterion, optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=2)
    criterion = smp.utils.losses.BCEDiceLoss(eps=1.)
    runner = SupervisedRunner()

    runner.train(model=model,
                 criterion=criterion,
                 optimizer=optimizer,
                 scheduler=scheduler,
                 loaders=loaders,
                 callbacks=[
                     DiceCallback(),
                     EarlyStoppingCallback(patience=5, min_delta=0.001)
                 ],
                 logdir=logdir,
                 num_epochs=args.num_epochs,
                 verbose=True)
    utils.plot_metrics(
        logdir=logdir,
        # specify which metrics we want to plot
        metrics=["loss", "dice", "lr", "_base/lr"])
コード例 #10
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        preprocess_input = sm.get_preprocessing(
            self.system_dict["params"]["backbone"])
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["dataset"]["train"]
            ["classes_to_train"]) == 1 else (
                len(self.system_dict["dataset"]["train"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(self.system_dict["params"]["lr"])

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        # Dataset for train images
        train_dataset = Dataset(
            self.system_dict["dataset"]["train"]["img_dir"],
            self.system_dict["dataset"]["train"]["mask_dir"],
            self.system_dict["dataset"]["train"]["classes_dict"],
            classes_to_train=self.system_dict["dataset"]["train"]
            ["classes_to_train"],
            augmentation=get_training_augmentation(),
            preprocessing=get_preprocessing(preprocess_input),
        )

        if (self.system_dict["params"]["image_shape"][0] % 32 != 0):
            self.system_dict["params"]["image_shape"][0] += (
                32 - self.system_dict["params"]["image_shape"][0] % 32)

        if (self.system_dict["params"]["image_shape"][1] % 32 != 0):
            self.system_dict["params"]["image_shape"][1] += (
                32 - self.system_dict["params"]["image_shape"][1] % 32)

        # Dataset for validation images
        if (self.system_dict["dataset"]["val"]["status"]):
            valid_dataset = Dataset(
                self.system_dict["dataset"]["val"]["img_dir"],
                self.system_dict["dataset"]["val"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )
        else:
            valid_dataset = Dataset(
                self.system_dict["dataset"]["train"]["img_dir"],
                self.system_dict["dataset"]["train"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )

        self.system_dict["local"]["train_dataloader"] = Dataloder(
            train_dataset,
            batch_size=self.system_dict["params"]["batch_size"],
            shuffle=True)
        self.system_dict["local"]["valid_dataloader"] = Dataloder(
            valid_dataset, batch_size=1, shuffle=False)
コード例 #11
0
train_ids, valid_ids = train_test_split(id_mask_count['img_id'].values, random_state=42, stratify=id_mask_count['count'], test_size=0.1)
test_ids = sub['Image_Label'].apply(lambda x: x.split('_')[0]).drop_duplicates().values

print("creating preprocessing module...")

ENCODER = 'resnet50'
ENCODER_WEIGHTS = 'imagenet'
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)

print("creating data loader...")

num_workers = 4
bs = 16

train_dataset = CloudDataset(path = path, df=train, datatype='train', img_ids=train_ids, transforms = utils.get_training_augmentation(), preprocessing = utils.get_preprocessing(preprocessing_fn))
valid_dataset = CloudDataset(path = path, df=train, datatype='valid', img_ids=valid_ids, transforms = utils.get_validation_augmentation(), preprocessing = utils.get_preprocessing(preprocessing_fn))

train_loader = DataLoader(train_dataset, batch_size=bs, shuffle=True, num_workers=num_workers)
valid_loader = DataLoader(valid_dataset, batch_size=bs, shuffle=False, num_workers=num_workers)

loaders = {
    "train": train_loader,
    "valid": valid_loader
}

print("setting for training...")

ACTIVATION = None
model = smp.DeepLabV3Plus(
    encoder_name=ENCODER, 
    encoder_weights=ENCODER_WEIGHTS, 
コード例 #12
0
def main(args):
    """
    Main code for training for training a U-Net with some user-defined encoder.
    Args:
        args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
    Returns:
        None
    """
    # setting up the train/val split with filenames
    train, sub, id_mask_count = setup_train_and_sub_df(args.dset_path)
    # setting up the train/val split with filenames
    seed_everything(args.split_seed)
    train_ids, valid_ids = train_test_split(id_mask_count["im_id"].values,
                                            random_state=args.split_seed,
                                            stratify=id_mask_count["count"],
                                            test_size=args.test_size)
    # setting up model (U-Net with ImageNet Encoders)
    ENCODER_WEIGHTS = "imagenet"
    DEVICE = "cuda"

    attention_type = None if args.attention_type == "None" else args.attention_type
    model = smp.Unet(encoder_name=args.encoder,
                     encoder_weights=ENCODER_WEIGHTS,
                     classes=4,
                     activation=None,
                     attention_type=attention_type)
    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        args.encoder, ENCODER_WEIGHTS)

    # Setting up the I/O
    train_dataset = SteelDataset(
        args.dset_path,
        df=train,
        datatype="train",
        im_ids=train_ids,
        transforms=get_training_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
        use_resized_dataset=args.use_resized_dataset)
    valid_dataset = SteelDataset(
        args.dset_path,
        df=train,
        datatype="valid",
        im_ids=valid_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
        use_resized_dataset=args.use_resized_dataset)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.num_workers)

    loaders = {"train": train_loader, "valid": valid_loader}
    # everything is saved here (i.e. weights + stats)
    logdir = "./logs/segmentation"

    # model, criterion, optimizer
    optimizer = torch.optim.Adam([
        {
            "params": model.decoder.parameters(),
            "lr": args.encoder_lr
        },
        {
            "params": model.encoder.parameters(),
            "lr": args.decoder_lr
        },
    ])
    scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=2)
    criterion = smp.utils.losses.BCEDiceLoss(eps=1.)
    runner = SupervisedRunner()

    callbacks_list = [
        DiceCallback(),
        EarlyStoppingCallback(patience=5, min_delta=0.001),
    ]
    if args.checkpoint_path != "None":  # hacky way to say no checkpoint callback but eh what the heck
        ckpoint_p = Path(args.checkpoint_path)
        fname = ckpoint_p.name
        resume_dir = str(ckpoint_p.parents[0]
                         )  # everything in the path besides the base file name
        print(
            f"Loading {fname} from {resume_dir}. Checkpoints will also be saved in {resume_dir}."
        )
        callbacks_list = callbacks_list + [
            CheckpointCallback(resume=fname, resume_dir=resume_dir),
        ]

    runner.train(model=model,
                 criterion=criterion,
                 optimizer=optimizer,
                 scheduler=scheduler,
                 loaders=loaders,
                 callbacks=callbacks_list,
                 logdir=logdir,
                 num_epochs=args.num_epochs,
                 verbose=True)
コード例 #13
0
def main():
    """Parameters initialization and starting SG model training """
    # read command line arguments
    args = get_parser().parse_args()

    # set random seed
    seed_everything(args.seed)

    # paths to dataset
    train_path = osp.join(args.dataset_path, 'train')
    test_path = osp.join(args.dataset_path, 'test')

    # declare generator and discriminator models
    generator = Generator_with_Refin(args.encoder)
    discriminator = Discriminator(input_shape=(3,args.img_size,args.img_size))

    # load weights
    if args.gen_weights != '':
        generator.load_state_dict(torch.load(args.gen_weights))
        print('Generator weights loaded!')

    if args.discr_weights != '':
        discriminator.load_state_dict(torch.load(args.discr_weights))
        print('Discriminator weights loaded!')

    # declare datasets
    train_dataset = ARDataset(train_path,
                              augmentation=get_training_augmentation(args.img_size),
                              augmentation_images=get_image_augmentation(),
                              preprocessing=get_preprocessing(),)

    valid_dataset = ARDataset(test_path,
                              augmentation=get_validation_augmentation(args.img_size),
                              preprocessing=get_preprocessing(),)

    # declare loaders
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    # declare loss functions, optimizers and scheduler
    l2loss = nn.MSELoss()
    perloss = ContentLoss(feature_extractor="vgg16", layers=("relu3_3", ))
    GANloss = nn.MSELoss()

    optimizer_G = torch.optim.Adam([dict(params=generator.parameters(), lr=args.lr_G),])
    optimizer_D = torch.optim.Adam([dict(params=discriminator.parameters(), lr=args.lr_D),])

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_G, mode='min', factor=0.9, patience=args.patience)

    # device
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')

    # tensorboard
    writer = SummaryWriter()

    # start training
    train(
        generator=generator,
        discriminator=discriminator,
        device=device,
        n_epoch=args.n_epoch,
        optimizer_G=optimizer_G,
        optimizer_D=optimizer_D,
        train_loader=train_loader,
        valid_loader=valid_loader,
        scheduler=scheduler,
        losses=[l2loss, perloss, GANloss],
        models_paths=[args.Gmodel_path, args.Dmodel_path],
        bettas=[args.betta1, args.betta2, args.betta3],
        writer=writer,
    )
コード例 #14
0
ファイル: train.py プロジェクト: iliailmer/kaggle_clouds
                             path=path,
                             datatype='train',
                             preload=False,
                             img_ids=train_ids,
                             filter_bad_images=True,
                             transforms=get_training_augmentation(
                                 size=(args.size, args.size * 2), p=0.5),
                             preprocessing=get_preprocessing(preprocessing_fn))
valid_dataset = CloudDataset(df=train,
                             image_size=(args.size, args.size * 2),
                             path=path,
                             datatype='valid',
                             preload=False,
                             img_ids=valid_ids,
                             filter_bad_images=True,
                             transforms=get_validation_augmentation(
                                 (args.size, args.size * 2)),
                             preprocessing=get_preprocessing(preprocessing_fn))

train_loader = DataLoader(train_dataset,
                          batch_size=bs,
                          shuffle=True,
                          num_workers=num_workers)
valid_loader = DataLoader(valid_dataset,
                          batch_size=bs,
                          shuffle=False,
                          num_workers=num_workers)

loaders = {"train": train_loader, "valid": valid_loader}

runner = SupervisedRunner(model=model,
                          device='cuda',
コード例 #15
0
def get_train_val_loaders(encoder_type, batch_size=16, ifold=0):
    if encoder_type.startswith('myunet'):
        encoder_type = 'resnet50'
    preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder_type, 'imagenet')
    train, train_ids, valid_ids = prepare_df(ifold=ifold)
    print('val:', valid_ids[:10])
    num_workers = 24
    train_dataset = CloudDataset(df=train, datatype='train', img_ids=train_ids, transforms = get_training_augmentation(), preprocessing=get_preprocessing(preprocessing_fn))
    valid_dataset = CloudDataset(df=train, datatype='valid', img_ids=valid_ids, transforms = get_validation_augmentation(), preprocessing=get_preprocessing(preprocessing_fn))

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    train_loader.num = len(train_ids)
    valid_loader.num = len(valid_ids)

    loaders = {
        "train": train_loader,
        "valid": valid_loader
    }
    return loaders
コード例 #16
0
path = './dataset/'
num_workers = 0
bs = 1

sub = pd.read_csv(os.path.join(path, 'sample_submission.csv'))
sub['label'] = sub['Image_Label'].apply(lambda x: x.split('_')[1])
sub['im_id'] = sub['Image_Label'].apply(lambda x: x.split('_')[0])
test_ids = sub['Image_Label'].apply(
    lambda x: x.split('_')[0]).drop_duplicates().values

test_dataset = CloudDataset(
    path=path,
    df=sub,
    datatype='test',
    img_ids=test_ids,
    transforms=utils.get_validation_augmentation(),
    preprocessing=utils.get_preprocessing(preprocessing_fn))
test_loader = DataLoader(test_dataset,
                         batch_size=bs,
                         shuffle=False,
                         num_workers=num_workers)

print("loading state dict...")
model_path = 'best_dice_model.pth'
model = torch.load(model_path)

print("predicting...")

sigmoid = lambda x: 1 / (1 + np.exp(-x))
class_params = {
    0: (0.65, 10000),
コード例 #17
0
    num_classes = len(select_class_rgb_values)

    model, preprocessing_fn = get_deeplab_model(num_classes, cfg.MODEL.encoder)

    # Get train and val dataset instances
    train_dataset = LandCoverDataset(
        train_df,
        augmentation=get_training_augmentation(cfg.TRAIN.augment_type),
        preprocessing=get_preprocessing(preprocessing_fn),
        class_rgb_values=select_class_rgb_values,
    )

    valid_dataset = LandCoverDataset(
        val_df,
        augmentation=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn),
        class_rgb_values=select_class_rgb_values,
    )

    if cfg.DEBUG:
        # if I only want to debug code, train and val only for 10 samples
        train_dataset = Subset(train_dataset, [n for n in range(10)])
        valid_dataset = Subset(valid_dataset, [n for n in range(10)])

    # Get train and val data loaders
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.TRAIN.batch_size,
                              shuffle=True,
                              drop_last=True,
                              pin_memory=True,