Ejemplo n.º 1
0
def validate(model, val_dataset, config):
    loaders = collections.OrderedDict()
    val_loader = utils.get_loader(
        val_dataset,
        open_fn=lambda x: {
            "input_audio": x[-1],
            "input_video": x[1],
            "targets": x[0]
        },
        batch_size=config.batch_size,
        num_workers=config.workers,
        shuffle=False,
    )

    loaders["valid"] = val_loader

    runner = SupervisedRunner(
        input_key=["input_audio",
                   "input_video"])  # parameters of the model in forward(...)
    runner.infer(
        model,
        loaders,
        callbacks=collections.OrderedDict({
            "snr_callback": SNRCallback(),
            "sdr_callback": SDRCallback()
        }),
        verbose=True,
    )
Ejemplo n.º 2
0
def generate_test_preds(class_params):

    preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
    dummy_dataset = CloudDataset(df=sub, datatype='test', img_ids=test_ids[:1],  transforms=get_validation_augmentation(),
                                preprocessing=get_preprocessing(preprocessing_fn))
    dummy_loader = DataLoader(dummy_dataset, batch_size=1, shuffle=False, num_workers=0)

    model = smp.FPN(
        encoder_name=ENCODER,
        encoder_weights=ENCODER_WEIGHTS,
        classes=4,
        activation=ACTIVATION,
    )
    runner = SupervisedRunner(model)

    # HACK: We are loading a few examples from our dummy loader so catalyst will properly load the weights
    # from our checkpoint
    loaders = {"test": dummy_loader}
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(
                resume=f"{logdir}/checkpoints/best.pth"),
            InferCallback()
        ],
    )

    # Now we do real inference on the full dataset
    test_dataset = CloudDataset(df=sub, datatype='test', img_ids=test_ids,  transforms=get_validation_augmentation(),
                                preprocessing=get_preprocessing(preprocessing_fn))
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0)

    encoded_pixels = []
    image_id = 0
    for i, test_batch in enumerate(tqdm.tqdm(test_loader)):
        runner_out = runner.predict_batch({"features": test_batch[0].cuda()})['logits'].cpu().detach().numpy()
        for i, batch in enumerate(runner_out):
            for probability in batch:

                # probability = probability.cpu().detach().numpy()
                if probability.shape != (350, 525):
                    probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
                predict, num_predict = post_process(sigmoid(probability), class_params[image_id % 4][0],
                                                    class_params[image_id % 4][1])
                if num_predict == 0:
                    encoded_pixels.append('')
                else:
                    r = mask2rle(predict)
                    encoded_pixels.append(r)
                image_id += 1

    print("Saving submission...")
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv('submission.csv', columns=['Image_Label', 'EncodedPixels'], index=False)
    print("Saved.")
Ejemplo n.º 3
0
def generate_valid_preds(args):

    train_ids, valid_ids, logdir = args
    preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, 'imagenet')
    valid_dataset = CloudDataset(
        df=train,
        datatype='valid',
        img_ids=valid_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn))
    valid_loader = DataLoader(valid_dataset,
                              batch_size=1,
                              shuffle=False,
                              num_workers=0)

    model = smp.Unet(
        encoder_name=ENCODER,
        encoder_weights=ENCODER_WEIGHTS,
        classes=4,
        activation=ACTIVATION,
    )

    runner = SupervisedRunner()
    # Generate validation predictions
    loaders = {"infer": valid_loader}
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
            InferCallback()
        ],
    )

    valid_preds = np.load('data/valid_preds.npy')

    for im_id, preds in zip(valid_ids,
                            runner.callbacks[0].predictions["logits"]):

        preds = preds.transpose((1, 2, 0))
        preds = cv2.resize(preds, (525, 350))
        preds = preds.transpose((2, 0, 1))

        indexes = train.index[train['im_id'] == im_id]
        valid_preds[indexes[0]] = preds[0]  # fish
        valid_preds[indexes[1]] = preds[1]  # flower
        valid_preds[indexes[2]] = preds[2]  # gravel
        valid_preds[indexes[3]] = preds[3]  # sugar

    np.save('data/valid_preds.npy', valid_preds)

    return True
Ejemplo n.º 4
0
def run_validation(data,
                   valid_path,
                   image_size,
                   batch_size,
                   splits,
                   fold_idx,
                   model,
                   exp_name,
                   labels,
                   ttatype=None):
    logdir = 'logs/{}_fold{}/'.format(exp_name, fold_idx)
    valid_data = data.loc[splits['test_idx'][fold_idx], :]
    model.load_state_dict(
        torch.load(os.path.join(logdir,
                                'checkpoints/best.pth'))['model_state_dict'])
    model.eval()
    if ttatype == 'd4':
        model = tta.TTAWrapper(model, tta.d4_image2label)
    elif ttatype == 'fliplr_image2label':
        model = tta.TTAWrapper(model, tta.d4_image2label)
    runner = SupervisedRunner(model=model)
    val_dataset = EyeDataset(dataset_path=valid_path,
                             labels=data.loc[splits['test_idx'][fold_idx],
                                             labels].values,
                             ids=data.loc[splits['test_idx'][fold_idx],
                                          'id'].values,
                             albumentations_tr=aug_val(image_size))
    val_loader = DataLoader(val_dataset,
                            num_workers=8,
                            pin_memory=False,
                            batch_size=batch_size,
                            shuffle=False)
    loaders = collections.OrderedDict()
    loaders["valid"] = val_loader
    #predictions = runner.predict_loader(loaders["valid"], resume=f"{logdir}/checkpoints/best.pth")
    runner.infer(model=model, loaders=loaders, callbacks=[InferCallback()])
    predictions = runner.callbacks[0].predictions['logits']
    probabilities = softmax(torch.from_numpy(predictions), dim=1).numpy()
    for idx in range(probabilities.shape[0]):
        if all(probabilities[idx, :] < 0.5):
            probabilities[idx, 0] = 1.0
    predicted_labels = pd.DataFrame(probabilities, columns=labels)
    predicted_labels['id'] = data.loc[splits['test_idx'][fold_idx],
                                      'id'].values
    predicted_labels.loc[:, 'group'] = predicted_labels.id.apply(
        lambda x: x.split('_')[0])
    valid_data.loc[:, 'group'] = valid_data.id.apply(lambda x: x.split('_')[0])
    valid_data_groupped = valid_data.groupby(['group']).aggregate(
        dict(zip(labels, ['max'] * (len(labels)))))
    predicted_labels_groupped = predicted_labels.groupby(['group']).aggregate(
        dict(zip(labels, ['max'] * (len(labels)))))
    return (valid_data_groupped, predicted_labels_groupped)
def main():
    test = pd.read_csv(
        '/home/yuko/kaggle_understanding_cloud_organization/src/data_process/data/sample_submission.csv'
    )

    test['label'] = test['Image_Label'].apply(lambda x: x.split('_')[-1])
    test['im_id'] = test['Image_Label'].apply(
        lambda x: x.replace('_' + x.split('_')[-1], ''))

    test['img_label'] = test.EncodedPixels.apply(lambda x: 0
                                                 if x is np.nan else 1)

    img_label = test.groupby('im_id')['img_label'].agg(list).reset_index()

    test_id = np.array(img_label.im_id)

    test_dataset = CloudClassDataset(datatype='test',
                                     img_ids=test_id,
                                     transforms=get_validation_augmentation(),
                                     preprocessing=ort_get_preprocessing())

    test_loader = DataLoader(test_dataset,
                             batch_size=8,
                             shuffle=False,
                             num_workers=16)

    loaders = {"infer": test_loader}

    for fold in range(5):
        runner = SupervisedRunner()
        clf_model = ResNet()

        checkpoint = torch.load(
            f'/home/yuko/kaggle_understanding_cloud_organization/src/class/segmentation/fold_{fold}/checkpoints/best.pth'
        )
        clf_model.load_state_dict(checkpoint['model_state_dict'])
        clf_model.eval()
        runner.infer(
            model=clf_model,
            loaders=loaders,
            callbacks=[InferCallback()],
        )
        callbacks_num = 0
        pred = runner.callbacks[callbacks_num].predictions["logits"]

        df_pred = pd.DataFrame(
            [pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]]).T
        df_pred.to_csv(
            f'/home/yuko/kaggle_understanding_cloud_organization/src/class/segmentation/pred_{fold}.csv'
        )
Ejemplo n.º 6
0
def generate_test_preds(args):

    valid_dice, class_params, = args

    test_preds = np.zeros((len(sub), 350, 525), dtype=np.float32)

    for i in range(NFOLDS):
        logdir = LOG_DIR_BASE + str(i)

        preprocessing_fn = smp.encoders.get_preprocessing_fn(
            ENCODER, ENCODER_WEIGHTS)
        dummy_dataset = CloudDataset(
            df=sub,
            datatype='test',
            img_ids=test_ids[:1],
            transforms=get_validation_augmentation(),
            preprocessing=get_preprocessing(preprocessing_fn))
        dummy_loader = DataLoader(dummy_dataset,
                                  batch_size=1,
                                  shuffle=False,
                                  num_workers=0)

        model = smp.Unet(
            encoder_name=ENCODER,
            encoder_weights=ENCODER_WEIGHTS,
            classes=4,
            activation=ACTIVATION,
        )
        runner = SupervisedRunner(model)

        # HACK: We are loading a few examples from our dummy loader so catalyst will properly load the weights
        # from our checkpoint
        loaders = {"test": dummy_loader}
        runner.infer(
            model=model,
            loaders=loaders,
            callbacks=[
                CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
                InferCallback()
            ],
        )

        # Now we do real inference on the full dataset
        test_dataset = CloudDataset(
            df=sub,
            datatype='test',
            img_ids=test_ids,
            transforms=get_validation_augmentation(),
            preprocessing=get_preprocessing(preprocessing_fn))
        test_loader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0)

        image_id = 0
        for batch_index, test_batch in enumerate(tqdm.tqdm(test_loader)):
            runner_out = runner.predict_batch(
                {"features":
                 test_batch[0].cuda()})['logits'].cpu().detach().numpy()
            for preds in runner_out:

                preds = preds.transpose((1, 2, 0))
                preds = cv2.resize(
                    preds,
                    (525, 350))  # height and width are backward in cv2...
                preds = preds.transpose((2, 0, 1))

                idx = batch_index * 4
                test_preds[idx + 0] += sigmoid(preds[0]) / NFOLDS  # fish
                test_preds[idx + 1] += sigmoid(preds[1]) / NFOLDS  # flower
                test_preds[idx + 2] += sigmoid(preds[2]) / NFOLDS  # gravel
                test_preds[idx + 3] += sigmoid(preds[3]) / NFOLDS  # sugar

    # Convert ensembled predictions to RLE predictions
    encoded_pixels = []
    for image_id, preds in enumerate(test_preds):

        predict, num_predict = post_process(preds,
                                            class_params[image_id % 4][0],
                                            class_params[image_id % 4][1])
        if num_predict == 0:
            encoded_pixels.append('')
        else:
            r = mask2rle(predict)
            encoded_pixels.append(r)

    print("Saving submission...")
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv('unet_submission_{}.csv'.format(valid_dice),
               columns=['Image_Label', 'EncodedPixels'],
               index=False)
    print("Saved.")
                 DiceCallback(),
                 EarlyStoppingCallback(patience=5, min_delta=0.001)
             ],
             logdir=logdir,
             num_epochs=num_epochs,
             verbose=True)
utils.plot_metrics(
    logdir=logdir,
    # specify which metrics we want to plot
    metrics=["loss", "dice", 'lr', '_base/lr'])
encoded_pixels = []
loaders = {"infer": valid_loader}
runner.infer(
    model=model,
    loaders=loaders,
    callbacks=[
        CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
        InferCallback()
    ],
)
valid_masks = []
probabilities = np.zeros((2220, 350, 525))
for i, (batch, output) in enumerate(
        tqdm.tqdm(zip(valid_dataset,
                      runner.callbacks[0].predictions["logits"]))):
    image, mask = batch
    for m in mask:
        if m.shape != (350, 525):
            m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
        valid_masks.append(m)

    for j, probability in enumerate(output):
Ejemplo n.º 8
0
    criterion=criterion,
    optimizer=optimizer,
    loaders=loaders,
    logdir=logdir,
    num_epochs=num_epochs,
    check=True
)

# # Setup 8 - loader inference

# In[ ]:

from catalyst.dl.callbacks import InferCallback
loaders = collections.OrderedDict([("infer", loaders["train"])])
runner.infer(
    model=model, loaders=loaders, callbacks=[InferCallback()], check=True
)

# In[ ]:

runner.callbacks[0].predictions["logits"].shape

# # Setup 9 - batch inference

# In[ ]:

features, targets = next(iter(loaders["infer"]))

# In[ ]:

features.shape
Ejemplo n.º 9
0
                                          'in_channels': 3
                                          })}
model = models[args.model.lower()][0](**models[args.model.lower()][1])
encoded_pixels = []
loaders = {"infer": valid_loader}
logdir = f'./logs/{args.model}/fold_{args.fold}'
gc.collect()
runner = SupervisedRunner(model=model,
                          device='cuda',
                          input_key='image',
                          input_target_key='mask')
runner.infer(
    model=model,
    loaders=loaders,
    callbacks=[
        CheckpointCallback(
            resume=f"{logdir}/checkpoints/best.pth"),
        InferCallback()
    ],
    # fp16={"opt_level": "O1"},
)
valid_masks = []
probabilities = np.zeros((2220, 350, 525))
for i, (batch, output) in enumerate(tqdm.tqdm(zip(
        valid_dataset, runner.callbacks[0].predictions["logits"]))):
    gc.collect()
    image, mask = batch
    for m in mask:
        if m.shape != (350, 525):
            m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
        valid_masks.append(m)
Ejemplo n.º 10
0
def find_class_params(args):
    runner = SupervisedRunner()
    model = create_model(args.encoder_type)
    valid_loader = get_train_val_loaders(args.encoder_type,
                                         batch_size=args.batch_size)['valid']

    encoded_pixels = []
    loaders = {"infer": valid_loader}
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[CheckpointCallback(resume=args.ckp),
                   InferCallback()],
    )
    print(runner.callbacks)
    valid_masks = []
    probabilities = np.zeros((2220, 350, 525))
    for i, (batch, output) in enumerate(
            tqdm(
                zip(valid_loader.dataset,
                    runner.callbacks[0].predictions["logits"]))):
        image, mask = batch
        for m in mask:
            if m.shape != (350, 525):
                m = cv2.resize(m,
                               dsize=(525, 350),
                               interpolation=cv2.INTER_LINEAR)
            valid_masks.append(m)

        for j, probability in enumerate(output):
            if probability.shape != (350, 525):
                probability = cv2.resize(probability,
                                         dsize=(525, 350),
                                         interpolation=cv2.INTER_LINEAR)
            probabilities[i * 4 + j, :, :] = probability

    class_params = {}
    for class_id in range(4):
        print(class_id)
        attempts = []
        for t in range(0, 100, 5):
            t /= 100
            #for ms in [0, 100, 1200, 5000, 10000]:
            for ms in [5000, 10000, 15000, 20000, 22500, 25000, 30000]:

                masks = []
                for i in range(class_id, len(probabilities), 4):
                    probability = probabilities[i]
                    predict, num_predict = post_process(
                        sigmoid(probability), t, ms)
                    masks.append(predict)

                d = []
                for i, j in zip(masks, valid_masks[class_id::4]):
                    if (i.sum() == 0) & (j.sum() == 0):
                        d.append(1)
                    else:
                        d.append(dice(i, j))

                attempts.append((t, ms, np.mean(d)))

        attempts_df = pd.DataFrame(attempts,
                                   columns=['threshold', 'size', 'dice'])

        attempts_df = attempts_df.sort_values('dice', ascending=False)
        print(attempts_df.head())
        best_threshold = attempts_df['threshold'].values[0]
        best_size = attempts_df['size'].values[0]

        class_params[class_id] = (best_threshold, best_size)
    print(class_params)
    return class_params, runner
Ejemplo n.º 11
0
def generate_test_preds(ensemble_info):

    test_preds = np.zeros((len(sub), 350, 525), dtype=np.float32)
    num_models = len(ensemble_info)

    for model_info in ensemble_info:

        class_params = model_info['class_params']
        encoder = model_info['encoder']
        model_type = model_info['model_type']
        logdir = model_info['logdir']

        preprocessing_fn = smp.encoders.get_preprocessing_fn(
            encoder, ENCODER_WEIGHTS)

        model = None
        if model_type == 'unet':
            model = smp.Unet(
                encoder_name=encoder,
                encoder_weights=ENCODER_WEIGHTS,
                classes=4,
                activation=ACTIVATION,
            )
        elif model_type == 'fpn':
            model = smp.FPN(
                encoder_name=encoder,
                encoder_weights=ENCODER_WEIGHTS,
                classes=4,
                activation=ACTIVATION,
            )
        else:
            raise NotImplementedError("We only support FPN and UNet")

        runner = SupervisedRunner(model)

        # HACK: We are loading a few examples from our dummy loader so catalyst will properly load the weights
        # from our checkpoint
        dummy_dataset = CloudDataset(
            df=sub,
            datatype='test',
            img_ids=test_ids[:1],
            transforms=get_validation_augmentation(),
            preprocessing=get_preprocessing(preprocessing_fn))
        dummy_loader = DataLoader(dummy_dataset,
                                  batch_size=1,
                                  shuffle=False,
                                  num_workers=0)
        loaders = {"test": dummy_loader}
        runner.infer(
            model=model,
            loaders=loaders,
            callbacks=[
                CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
                InferCallback()
            ],
        )

        # Now we do real inference on the full dataset
        test_dataset = CloudDataset(
            df=sub,
            datatype='test',
            img_ids=test_ids,
            transforms=get_validation_augmentation(),
            preprocessing=get_preprocessing(preprocessing_fn))
        test_loader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0)

        image_id = 0
        for batch_index, test_batch in enumerate(tqdm.tqdm(test_loader)):
            runner_out = runner.predict_batch(
                {"features":
                 test_batch[0].cuda()})['logits'].cpu().detach().numpy()

            # Applt TTA transforms
            v_flip = test_batch[0].flip(dims=(2, ))
            h_flip = test_batch[0].flip(dims=(3, ))
            v_flip_out = runner.predict_batch({"features": v_flip.cuda()
                                               })['logits'].cpu().detach()
            h_flip_out = runner.predict_batch({"features": h_flip.cuda()
                                               })['logits'].cpu().detach()
            # Undo transforms
            v_flip_out = v_flip_out.flip(dims=(2, )).numpy()
            h_flip_out = h_flip_out.flip(dims=(3, )).numpy()
            # Get average
            tta_avg_out = (v_flip_out + h_flip_out) / 2

            # Combine with original predictions
            beta = 0.4  # From fastai TTA
            runner_out = (beta) * runner_out + (1 - beta) * tta_avg_out

            for preds in runner_out:

                preds = preds.transpose((1, 2, 0))
                preds = cv2.resize(
                    preds,
                    (525, 350))  # height and width are backward in cv2...
                preds = preds.transpose((2, 0, 1))

                idx = batch_index * 4
                test_preds[idx + 0] += sigmoid(preds[0]) / num_models  # fish
                test_preds[idx + 1] += sigmoid(preds[1]) / num_models  # flower
                test_preds[idx + 2] += sigmoid(preds[2]) / num_models  # gravel
                test_preds[idx + 3] += sigmoid(preds[3]) / num_models  # sugar

    # Convert ensembled predictions to RLE predictions
    encoded_pixels = []
    for image_id, preds in enumerate(test_preds):

        predict, num_predict = post_process(preds,
                                            class_params[image_id % 4][0],
                                            class_params[image_id % 4][1])
        if num_predict == 0:
            encoded_pixels.append('')
        else:
            r = mask2rle(predict)
            encoded_pixels.append(r)

    print("Saving submission...")
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv('ensembled_submission.csv',
               columns=['Image_Label', 'EncodedPixels'],
               index=False)
    print("Saved.")
def main():
    fold_path = args.fold_path
    fold_num = args.fold_num
    model_name = args.model_name
    train_csv = args.train_csv
    sub_csv = args.sub_csv
    encoder = args.encoder
    num_workers = args.num_workers
    batch_size = args.batch_size
    log_path = args.log_path
    is_tta = args.is_tta
    test_batch_size = args.test_batch_size
    attention_type = args.attention_type
    print(log_path)

    train = pd.read_csv(train_csv)
    train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[-1])
    train['im_id'] = train['Image_Label'].apply(lambda x: x.replace('_' + x.split('_')[-1], ''))

    val_fold = pd.read_csv(f'{fold_path}/valid_file_fold_{fold_num}.csv')
    valid_ids = np.array(val_fold.file_name)

    attention_type = None if attention_type == 'None' else attention_type

    encoder_weights = 'imagenet'

    if model_name == 'Unet':
        model = smp.Unet(
            encoder_name=encoder,
            encoder_weights=encoder_weights,
            classes=CLASS,
            activation='softmax',
            attention_type=attention_type,
        )
    if model_name == 'Linknet':
        model = smp.Linknet(
            encoder_name=encoder,
            encoder_weights=encoder_weights,
            classes=CLASS,
            activation='softmax',
        )
    if model_name == 'FPN':
        model = smp.FPN(
            encoder_name=encoder,
            encoder_weights=encoder_weights,
            classes=CLASS,
            activation='softmax',
        )
    if model_name == 'ORG':
        model = Linknet_resnet18_ASPP(
        )


    preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder, encoder_weights)

    valid_dataset = CloudDataset(df=train,
                                 datatype='valid',
                                 img_ids=valid_ids,
                                 transforms=get_validation_augmentation(),
                                 preprocessing=get_preprocessing(preprocessing_fn))

    valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    loaders = {"infer": valid_loader}
    runner = SupervisedRunner()

    checkpoint = torch.load(f"{log_path}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()
    transforms = tta.Compose(
        [
            tta.HorizontalFlip(),
            tta.VerticalFlip(),
        ]
    )
    model = tta.SegmentationTTAWrapper(model, transforms)
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[InferCallback()],
    )
    callbacks_num = 0

    valid_masks = []
    probabilities = np.zeros((valid_dataset.__len__() * CLASS, IMG_SIZE[0], IMG_SIZE[1]))

    # ========
    # val predict
    #

    for batch in tqdm(valid_dataset):  # クラスごとの予測値
        _, mask = batch
        for m in mask:
            m = resize_img(m)
            valid_masks.append(m)

    for i, output in enumerate(tqdm(runner.callbacks[callbacks_num].predictions["logits"])):
        for j, probability in enumerate(output):
            probability = resize_img(probability)  # 各クラスごとにprobability(予測値)が取り出されている。jは0~3だと思う。
            probabilities[i * CLASS + j, :, :] = probability

    # ========
    # search best size and threshold
    #

    class_params = {}
    for class_id in range(CLASS):
        attempts = []
        for threshold in range(20, 90, 5):
            threshold /= 100
            for min_size in [10000, 15000, 20000]:
                masks = class_masks(class_id, probabilities, threshold, min_size)
                dices = class_dices(class_id, masks, valid_masks)
                attempts.append((threshold, min_size, np.mean(dices)))

        attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice'])
        attempts_df = attempts_df.sort_values('dice', ascending=False)

        print(attempts_df.head())

        best_threshold = attempts_df['threshold'].values[0]
        best_size = attempts_df['size'].values[0]

        class_params[class_id] = (best_threshold, best_size)

    # ========
    # gc
    #
    torch.cuda.empty_cache()
    gc.collect()

    # ========
    # predict
    #
    sub = pd.read_csv(sub_csv)
    sub['label'] = sub['Image_Label'].apply(lambda x: x.split('_')[-1])
    sub['im_id'] = sub['Image_Label'].apply(lambda x: x.replace('_' + x.split('_')[-1], ''))

    test_ids = sub['Image_Label'].apply(lambda x: x.split('_')[0]).drop_duplicates().values

    test_dataset = CloudDataset(df=sub,
                                datatype='test',
                                img_ids=test_ids,
                                transforms=get_validation_augmentation(),
                                preprocessing=get_preprocessing(preprocessing_fn))

    encoded_pixels = get_test_encoded_pixels(test_dataset, runner, class_params, test_batch_size)
    sub['EncodedPixels'] = encoded_pixels

    # ========
    # val dice
    #

    val_Image_Label = []
    for i, row in val_fold.iterrows():
        val_Image_Label.append(row.file_name + '_Fish')
        val_Image_Label.append(row.file_name + '_Flower')
        val_Image_Label.append(row.file_name + '_Gravel')
        val_Image_Label.append(row.file_name + '_Sugar')

    val_encoded_pixels = get_test_encoded_pixels(valid_dataset, runner, class_params, test_batch_size)
    val = pd.DataFrame(val_encoded_pixels, columns=['EncodedPixels'])
    val['Image_Label'] = val_Image_Label

    sub.to_csv(f'./sub/sub_{model_name}_fold_{fold_num}_{encoder}.csv', columns=['Image_Label', 'EncodedPixels'], index=False)
    val.to_csv(f'./val/val_{model_name}_fold_{fold_num}_{encoder}.csv', columns=['Image_Label', 'EncodedPixels'], index=False)
Ejemplo n.º 13
0
def main():

    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--seed', type=int, default=1234, help='Random seed')
    arg('--model-name',
        type=str,
        default=Path('seresnext101'),
        help='String model name used for saving')
    arg('--run-root',
        type=Path,
        default=Path('../results'),
        help='Directory for saving model')
    arg('--data-root', type=Path, default=Path('../data'))
    arg('--image-size', type=int, default=224, help='Image size for training')
    arg('--batch-size',
        type=int,
        default=16,
        help='Batch size during training')
    arg('--fold', type=int, default=0, help='Validation fold')
    arg('--n-epochs', type=int, default=10, help='Epoch to run')
    arg('--learning-rate',
        type=float,
        default=1e-3,
        help='Initial learning rate')
    arg('--step', type=int, default=1, help='Current training step')
    arg('--patience', type=int, default=4)
    arg('--criterion', type=str, default='bce', help='Criterion')
    arg('--optimizer', default='Adam', help='Name of the optimizer')
    arg('--continue_train', type=bool, default=False)
    arg('--checkpoint',
        type=str,
        default=Path('../results'),
        help='Checkpoint file path')
    arg('--workers', type=int, default=2)
    arg('--debug', type=bool, default=True)
    args = parser.parse_args()

    set_seed(args.seed)
    """
    
    SET PARAMS
    
    """
    args.debug = True
    ON_KAGGLE = configs.ON_KAGGLE
    N_CLASSES = configs.NUM_CLASSES
    args.image_size = configs.SIZE
    args.data_root = configs.DATA_ROOT
    use_cuda = cuda.is_available()
    fold = args.fold
    num_workers = args.workers
    num_epochs = args.n_epochs
    batch_size = args.batch_size
    learning_rate = args.learning_rate
    """

    LOAD DATA
    
    """
    print(os.listdir(args.data_root))
    folds = pd.read_csv(args.data_root / 'folds.csv')
    train_root = args.data_root / 'train'

    if args.debug:
        folds = folds.head(50)
    train_fold = folds[folds['fold'] != fold]
    valid_fold = folds[folds['fold'] == fold]
    check_fold(train_fold, valid_fold)

    def get_dataloader(df: pd.DataFrame, image_transform) -> DataLoader:
        """
        Calls dataloader to load Imet Dataset
        """
        return DataLoader(
            ImetDataset(train_root, df, image_transform),
            shuffle=True,
            batch_size=batch_size,
            num_workers=num_workers,
        )

    train_loader = get_dataloader(train_fold, image_transform=albu_transform)
    valid_loader = get_dataloader(valid_fold, image_transform=valid_transform)
    print('{} items in train, {} in valid'.format(len(train_loader.dataset),
                                                  len(valid_loader.dataset)))
    loaders = OrderedDict()
    loaders["train"] = train_loader
    loaders["valid"] = valid_loader
    """
    
    MODEL
    
    """
    model = seresnext101(num_classes=N_CLASSES)
    if use_cuda:
        model = model.cuda()

    criterion = nn.BCEWithLogitsLoss()
    optimizer = Adam(model.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               factor=0.5,
                                               patience=args.patience)
    """
    
    MODEL RUNNER
    
    """
    # call an instance of the model runner
    runner = SupervisedRunner()
    # logs folder
    current_time = datetime.now().strftime('%b%d_%H_%M')
    prefix = f'{current_time}_{args.model_name}'
    logdir = os.path.join(args.run_root, prefix)
    os.makedirs(logdir, exist_ok=False)

    print('\tTrain session    :', prefix)
    print('\tOn KAGGLE      :', ON_KAGGLE)
    print('\tDebug          :', args.debug)
    print('\tClasses number :', N_CLASSES)
    print('\tModel          :', args.model_name)
    print('\tParameters     :', model.parameters())
    print('\tImage size     :', args.image_size)
    print('\tEpochs         :', num_epochs)
    print('\tWorkers        :', num_workers)
    print('\tLog dir        :', logdir)
    print('\tLearning rate  :', learning_rate)
    print('\tBatch size     :', batch_size)
    print('\tPatience       :', args.patience)

    if args.continue_train:
        state = load_model(model, args.checkpoint)
        epoch = state['epoch']
        step = state['step']
        print('Loaded model weights from {}, epoch {}, step {}'.format(
            args.checkpoint, epoch, step))

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,
        callbacks=[
            F1ScoreCallback(threshold=0.5),
            #F2ScoreCallback(num_classes=N_CLASSES),
            EarlyStoppingCallback(patience=args.patience, min_delta=0.01)
        ],
        logdir=logdir,
        num_epochs=num_epochs,
        verbose=True)

    # by default it only plots loss, works in IPython Notebooks
    #utils.plot_metrics(logdir=logdir, metrics=["loss", "_base/lr"])
    """
    
    INFERENCE TEST
    
    """
    loaders = OrderedDict([("infer", loaders["train"])])
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
            InferCallback()
        ],
    )
    print(runner.callbacks[1].predictions["logits"])
Ejemplo n.º 14
0
 probabilities_list = []
 ttatype='d4'
 for fold_idx in range(len(splits['test_idx'])):
     print('Getting predictions from fold {}'.format(fold_idx))
     logdir = 'logs/{}_fold{}/'.format(exp_name, fold_idx)    
     model = prepare_model(model_name, n_classes)
     model.cuda()
     model.load_state_dict(torch.load(os.path.join(logdir,'checkpoints/best.pth'))['model_state_dict'])
     model.eval()   
     if ttatype=='d4':
         model = tta.TTAWrapper(model, tta.d4_image2label)
     elif ttatype=='fliplr_image2label':
         model = tta.TTAWrapper(model, tta.d4_image2label)
     runner = SupervisedRunner(model=model)   
     #predictions = runner.predict_loader(loaders["valid"], resume=f"{logdir}/checkpoints/best.pth")
     runner.infer(model=model,loaders=loaders,callbacks=[InferCallback()])
     predictions = runner.callbacks[0].predictions['logits']
     probabilities = softmax(torch.from_numpy(predictions),dim=1).numpy()    
     for idx in range(probabilities.shape[0]):
         if all(probabilities[idx,:]<0.5):
             probabilities[idx,0] = 1.0
     probabilities_list.append(probabilities)
 probabilities_combined = np.stack(probabilities_list,axis=0).mean(axis=0) 
 predicted_labels = pd.DataFrame(probabilities_combined, columns=labels)
 predicted_labels['id'] = test_data.loc[:,'id'].values
 predicted_labels.loc[:,'ID'] = predicted_labels.id.apply(lambda x: x.split('_')[0])
 predicted_labels_groupped = predicted_labels.groupby(['ID']).aggregate(dict(zip(labels,['max']*(len(labels)))))
 predicted_labels_groupped['ID'] = predicted_labels_groupped.index.values.astype(int)
 predicted_labels_groupped.reset_index(drop=True, inplace=True)
 predicted_labels_groupped.sort_values('ID',inplace=True)
 predicted_labels_groupped = predicted_labels_groupped.loc[:,['ID']+labels] 
Ejemplo n.º 15
0
def main_kaggle_smp(path_dataset='/dataset/kaggle/understanding_cloud_organization',
                    ENCODER='resnet50',
                    ENCODER_WEIGHTS='imagenet',
                    num_workers=0,
                    batch_size=8,
                    epochs=19,
                    debug=False,
                    exec_catalyst=True,
                    logdir="/src/logs/segmentation",
                    pretrained=True
                    ):
    # below line is potential input args
    # (name_dataset='eurosat', lr=0.0001, wd=0, ratio=0.9, batch_size=32, workers=4, epochs=15, num_gpus=1,
    # resume=None, dir_weights='./weights'):

    torch.backends.cudnn.benchmark = True

    # Dataset
    train, sub = get_meta_info_table(path_dataset)
    train_ids, valid_ids, test_ids = prepare_dataset(train, sub)
    preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)

    train_dataset = CloudDataset(df=train, datatype='train', img_ids=train_ids, transforms=get_training_augmentation(),
                                 preprocessing=get_preprocessing(preprocessing_fn), path=path_dataset)
    valid_dataset = CloudDataset(df=train, datatype='valid', img_ids=valid_ids,
                                 transforms=get_validation_augmentation(),
                                 preprocessing=get_preprocessing(preprocessing_fn), path=path_dataset)
    # DataLoader
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    loaders = {
        "train": train_loader,
        "valid": valid_loader
    }

    # todo: check how to used device in this case
    DEVICE = 'cuda'
    if debug:
        device = 'cpu'
    else:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ACTIVATION = None
    model = smp.Unet(
        encoder_name=ENCODER,
        encoder_weights=ENCODER_WEIGHTS,
        classes=4,
        activation=ACTIVATION,
    )
    images, labels = next(iter(train_loader))
    model.to(device)
    print(model)
    print(summary(model, input_size=tuple(images.shape[1:])))

    # use smp epoch
    # num_epochs = 19

    # model, criterion, optimizer
    optimizer = torch.optim.Adam([
        {'params': model.decoder.parameters(), 'lr': 1e-2},
        {'params': model.encoder.parameters(), 'lr': 1e-3},
    ])
    scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=2)
    criterion = smp.utils.losses.DiceLoss(eps=1.)  # smp.utils.losses.BCEDiceLoss(eps=1.)

    if not pretrained:
        # catalyst
        if exec_catalyst:
            device = utils.get_device()
            runner = SupervisedRunner(device=device)

            # train model
            runner.train(
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                scheduler=scheduler,
                loaders=loaders,
                callbacks=[DiceCallback(), EarlyStoppingCallback(patience=5, min_delta=0.001)],
                logdir=logdir,
                num_epochs=epochs,
                verbose=True
            )

            # # prediction
            # encoded_pixels = []
            # loaders = {"infer": valid_loader}
            # runner.infer(
            #     model=model,
            #     loaders=loaders,
            #     callbacks=[
            #         CheckpointCallback(
            #             resume=f"{logdir}/checkpoints/best.pth"),
            #         InferCallback()
            #     ],
            # )
            # valid_masks = []
            #
            # # todo: where .pth?
            # # todo: from here
            # valid_num = valid_dataset.__len__()
            # probabilities = np.zeros((valid_num * 4, 350, 525))
            # for i, (batch, output) in enumerate(tqdm(zip(
            #         valid_dataset, runner.callbacks[0].predictions["logits"]))):
            #     image, mask = batch
            #     for m in mask:
            #         if m.shape != (350, 525):
            #             m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
            #         valid_masks.append(m)
            #
            #     for j, probability in enumerate(output):
            #         if probability.shape != (350, 525):
            #             probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
            #         probabilities[valid_num * 4 + j, :, :] = probability
            #
            # # todo: from here
            # class_params = {}
            # for class_id in range(4):
            #     print(class_id)
            #     attempts = []
            #     for t in range(0, 100, 5):
            #         t /= 100
            #         for ms in [0, 100, 1200, 5000, 10000]:
            #             masks = []
            #             for i in range(class_id, len(probabilities), 4):
            #                 probability = probabilities[i]
            #                 predict, num_predict = post_process(sigmoid(probability), t, ms)
            #                 masks.append(predict)
            #
            #             d = []
            #             for i, j in zip(masks, valid_masks[class_id::4]):
            #                 if (i.sum() == 0) & (j.sum() == 0):
            #                     d.append(1)
            #                 else:
            #                     d.append(dice(i, j))
            #
            #             attempts.append((t, ms, np.mean(d)))
            #
            #     attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice'])
            #
            #     attempts_df = attempts_df.sort_values('dice', ascending=False)
            #     print(attempts_df.head())
            #     best_threshold = attempts_df['threshold'].values[0]
            #     best_size = attempts_df['size'].values[0]
            #
            #     class_params[class_id] = (best_threshold, best_size)

        else:
            for epoch in trange(epochs, desc="Epochs"):
                metrics_train = train_epoch(model, train_loader, criterion, optimizer, device)
                metrics_eval = eval_epoch(model, valid_loader, criterion, device)

                scheduler.step(metrics_eval['valid_loss'])
                print(f'epoch: {epoch} ', metrics_train, metrics_eval)
    else:
        if exec_catalyst:
            device = utils.get_device()
            checkpoint = utils.load_checkpoint(f'{logdir}/checkpoints/best_full.pth')
            utils.unpack_checkpoint(checkpoint, model=model)
            runner = SupervisedRunner(model=model)

            # prediction with infer
            encoded_pixels = []
            loaders = {"infer": valid_loader}
            runner.infer(
                model=model,
                loaders=loaders,
                callbacks=[
                    CheckpointCallback(
                        resume=f"{logdir}/checkpoints/best.pth"),
                    InferCallback()
                ],
            )

            # todo: jupyterで確認中
            valid_masks = []


            valid_num = valid_dataset.__len__()
            probabilities = np.zeros((valid_num * 4, 350, 525))
            for i, (batch, output) in enumerate(tqdm(zip(
                    valid_dataset, runner.callbacks[0].predictions["logits"]))):
                image, mask = batch
                for m in mask:
                    if m.shape != (350, 525):
                        m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
                    valid_masks.append(m)

                for j, probability in enumerate(output):
                    if probability.shape != (350, 525):
                        probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
                    probabilities[i * 4 + j, :, :] = probability

            class_params = {}
            for class_id in range(4):
                print(class_id)
                attempts = []
                for t in range(0, 100, 5):
                    t /= 100
                    for ms in [0, 100, 1200, 5000, 10000]:
                        masks = []
                        for i in range(class_id, len(probabilities), 4):
                            probability = probabilities[i]
                            predict, num_predict = post_process(sigmoid(probability), t, ms)
                            masks.append(predict)

                        d = []
                        for i, j in zip(masks, valid_masks[class_id::4]):
                            if (i.sum() == 0) & (j.sum() == 0):
                                d.append(1)
                            else:
                                d.append(dice(i, j))

                        attempts.append((t, ms, np.mean(d)))

                attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice'])

                attempts_df = attempts_df.sort_values('dice', ascending=False)
                print(attempts_df.head())
                best_threshold = attempts_df['threshold'].values[0]
                best_size = attempts_df['size'].values[0]

                class_params[class_id] = (best_threshold, best_size)

            # predictions
            torch.cuda.empty_cache()
            gc.collect()

            test_dataset = CloudDataset(df=sub, datatype='test', img_ids=test_ids, transforms=get_validation_augmentation(),
                                        preprocessing=get_preprocessing(preprocessing_fn))
            test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=0)

            loaders = {"test": test_loader}
            encoded_pixels = []
            image_id = 0
            for i, test_batch in enumerate(tqdm(loaders['test'])):
                runner_out = runner.predict_batch({"features": test_batch[0].cuda()})['logits']
                for i, batch in enumerate(runner_out):
                    for probability in batch:

                        probability = probability.cpu().detach().numpy()
                        if probability.shape != (350, 525):
                            probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
                        predict, num_predict = post_process(sigmoid(probability), class_params[image_id % 4][0],
                                                            class_params[image_id % 4][1])
                        if num_predict == 0:
                            encoded_pixels.append('')
                        else:
                            r = mask2rle(predict)
                            encoded_pixels.append(r)
                        image_id += 1

            sub['EncodedPixels'] = encoded_pixels
            sub.to_csv('data/kaggle_cloud_org/submission.csv', columns=['Image_Label', 'EncodedPixels'], index=False)
Ejemplo n.º 16
0
def generate_class_params(i_dont_know_how_to_return_values_without_map):

    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        ENCODER, ENCODER_WEIGHTS)
    valid_dataset = CloudDataset(
        df=train,
        datatype='valid',
        img_ids=valid_ids,
        transforms=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocessing_fn))
    valid_loader = DataLoader(valid_dataset,
                              batch_size=1,
                              shuffle=False,
                              num_workers=0)

    model = smp.Unet(
        encoder_name=ENCODER,
        encoder_weights=ENCODER_WEIGHTS,
        classes=4,
        activation=ACTIVATION,
    )

    runner = SupervisedRunner()
    # Generate validation predictions
    encoded_pixels = []
    loaders = {"infer": valid_loader}
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
            InferCallback()
        ],
    )

    valid_masks = []
    probabilities = np.zeros((2220, 350, 525))
    for i, (batch, output) in enumerate(
            tqdm.tqdm(
                zip(valid_dataset,
                    runner.callbacks[0].predictions["logits"]))):
        image, mask = batch
        for m in mask:
            if m.shape != (350, 525):
                m = cv2.resize(m,
                               dsize=(525, 350),
                               interpolation=cv2.INTER_LINEAR)
            valid_masks.append(m)

        for j, probability in enumerate(output):
            if probability.shape != (350, 525):
                probability = cv2.resize(probability,
                                         dsize=(525, 350),
                                         interpolation=cv2.INTER_LINEAR)
            probabilities[i * 4 + j, :, :] = probability

    class_params = {}
    for class_id in range(4):
        print(class_id)
        attempts = []
        for t in range(30, 100, 5):
            t /= 100
            for ms in [1200, 5000, 10000]:
                masks = []
                for i in range(class_id, len(probabilities), 4):
                    probability = probabilities[i]
                    predict, num_predict = post_process(
                        sigmoid(probability), t, ms)
                    masks.append(predict)

                d = []
                for i, j in zip(masks, valid_masks[class_id::4]):
                    if (i.sum() == 0) & (j.sum() == 0):
                        d.append(1)
                    else:
                        d.append(dice(i, j))

                attempts.append((t, ms, np.mean(d)))

        attempts_df = pd.DataFrame(attempts,
                                   columns=['threshold', 'size', 'dice'])

        attempts_df = attempts_df.sort_values('dice', ascending=False)
        print(attempts_df.head())
        best_threshold = attempts_df['threshold'].values[0]
        best_size = attempts_df['size'].values[0]

        class_params[class_id] = (best_threshold, best_size)

    return class_params
# encoder = 'efficientnet-b4'
# arch = 'linknet'
model, preprocessing_fn = get_model(encoder, type=arch)

valid_dataset, loaders = get_loaders(bs, num_workers, preprocessing_fn)

train_loader = loaders['train']
valid_loader = loaders['valid']

print("Loading model")
runner = SupervisedRunner()
encoded_pixels = []
loaders = {"infer": valid_loader}
runner.infer(
    model=model,
    loaders=loaders,
    callbacks=[CheckpointCallback(resume=model_path),
               InferCallback()],
)
loaders['train'] = train_loader
loaders['valid'] = valid_loader

size = (320, 480)
if load_params:
    print(">>>> Loading params")
    with open(output_name + "_params.pkl", 'rb') as handle:
        class_params = pickle.load(handle)
else:
    print("Learning threshold and min area")
    valid_masks = []
    LIMIT = 800
    probabilities = np.zeros((int(LIMIT * 4), 320, 480))  #HARDCODED FOR NOW
Ejemplo n.º 18
0
def training(train_ids, valid_ids, num_split, encoder, decoder):
    """
    模型训练
    """
    train = "./data/Clouds_Classify/train.csv"

    # Data overview
    train = pd.read_csv(open(train))
    train.head()

    train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[1])
    train['im_id'] = train['Image_Label'].apply(lambda x: x.split('_')[0])

    ENCODER = encoder
    ENCODER_WEIGHTS = 'imagenet'

    if decoder == 'unet':
        model = smp.Unet(
            encoder_name=ENCODER,
            encoder_weights=ENCODER_WEIGHTS,
            classes=4,
            activation=None,
        )
    else:
        model = smp.FPN(
            encoder_name=ENCODER,
            encoder_weights=ENCODER_WEIGHTS,
            classes=4,
            activation=None,
        )
    preprocessing_fn = smp.encoders.get_preprocessing_fn(
        ENCODER, ENCODER_WEIGHTS)

    num_workers = 4
    bs = 12
    train_dataset = CloudDataset(
        df=train,
        transforms=get_training_augmentation(),
        datatype='train',
        img_ids=train_ids,
        preprocessing=get_preprocessing(preprocessing_fn))
    valid_dataset = CloudDataset(
        df=train,
        transforms=get_validation_augmentation(),
        datatype='valid',
        img_ids=valid_ids,
        preprocessing=get_preprocessing(preprocessing_fn))

    train_loader = DataLoader(train_dataset,
                              batch_size=bs,
                              shuffle=True,
                              num_workers=num_workers)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=num_workers)

    loaders = {"train": train_loader, "valid": valid_loader}

    num_epochs = 50
    logdir = "./logs/log_{}_{}/log_{}".format(encoder, decoder, num_split)

    # model, criterion, optimizer
    optimizer = torch.optim.Adam([
        {
            'params': model.decoder.parameters(),
            'lr': 1e-2
        },
        {
            'params': model.encoder.parameters(),
            'lr': 1e-3
        },
    ])
    scheduler = ReduceLROnPlateau(optimizer, factor=0.35, patience=4)
    criterion = smp.utils.losses.BCEDiceLoss(eps=1.)
    runner = SupervisedRunner()

    runner.train(model=model,
                 criterion=criterion,
                 optimizer=optimizer,
                 scheduler=scheduler,
                 loaders=loaders,
                 callbacks=[DiceCallback()],
                 logdir=logdir,
                 num_epochs=num_epochs,
                 verbose=True)

    # Exploring predictions
    loaders = {"infer": valid_loader}
    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
            InferCallback()
        ],
    )
def main(config):
    opts = config()
    path = opts.path
    train = pd.read_csv(f'{path}/train.csv')
    sub = pd.read_csv(f'{path}/sample_submission.csv')

    n_train = len(os.listdir(f'{path}/train_images'))
    n_test = len(os.listdir(f'{path}/test_images'))

    sub['label'] = sub['Image_Label'].apply(lambda x: x.split('_')[1])
    sub['im_id'] = sub['Image_Label'].apply(lambda x: x.split('_')[0])
    train.loc[train['EncodedPixels'].isnull() == False,
              'Image_Label'].apply(lambda x: x.split('_')[1]).value_counts()
    train.loc[train['EncodedPixels'].isnull() == False, 'Image_Label'].apply(
        lambda x: x.split('_')[0]).value_counts().value_counts()

    train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[1])
    train['im_id'] = train['Image_Label'].apply(lambda x: x.split('_')[0])

    valid_ids = pd.read_csv("csvs/valid_threshold.csv")["img_id"].values
    test_ids = sub['Image_Label'].apply(
        lambda x: x.split('_')[0]).drop_duplicates().values
    #     print(valid_ids)
    ENCODER = opts.backborn
    ENCODER_WEIGHTS = opts.encoder_weights
    DEVICE = 'cuda'

    ACTIVATION = None
    model = get_model(model_type=opts.model_type,
                      encoder=ENCODER,
                      encoder_weights=ENCODER_WEIGHTS,
                      activation=ACTIVATION,
                      n_classes=opts.class_num,
                      task=opts.task,
                      attention_type=opts.attention_type,
                      head='simple',
                      center=opts.center,
                      tta=opts.tta)
    if opts.refine:
        model = get_ref_model(infer_model=model,
                              encoder=opts.ref_backborn,
                              encoder_weights=ENCODER_WEIGHTS,
                              activation=ACTIVATION,
                              n_classes=opts.class_num,
                              preprocess=opts.preprocess,
                              tta=opts.tta)
    model = convert_model(model)
    preprocessing_fn = encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)

    encoded_pixels = []
    runner = SupervisedRunner()
    probabilities = np.zeros((2220, 350, 525))

    for i in range(opts.fold_max):
        if opts.refine:
            logdir = f"{opts.logdir}_refine/fold{i}"
        else:
            logdir = f"{opts.logdir}/fold{i}"
        valid_dataset = CloudDataset(
            df=train,
            datatype='valid',
            img_ids=valid_ids,
            transforms=get_validation_augmentation(opts.img_size),
            preprocessing=get_preprocessing(preprocessing_fn))
        valid_loader = DataLoader(valid_dataset,
                                  batch_size=opts.batchsize,
                                  shuffle=False,
                                  num_workers=opts.num_workers)
        loaders = {"infer": valid_loader}
        runner.infer(
            model=model,
            loaders=loaders,
            callbacks=[
                CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth"),
                InferCallback()
            ],
        )
        valid_masks = []
        for i, (batch, output) in enumerate(
                tqdm.tqdm(
                    zip(valid_dataset,
                        runner.callbacks[0].predictions["logits"]))):
            image, mask = batch
            for m in mask:
                if m.shape != (350, 525):
                    m = cv2.resize(m,
                                   dsize=(525, 350),
                                   interpolation=cv2.INTER_LINEAR)
                valid_masks.append(m)

            for j, probability in enumerate(output):
                if probability.shape != (350, 525):
                    probability = cv2.resize(probability,
                                             dsize=(525, 350),
                                             interpolation=cv2.INTER_LINEAR)
                probabilities[i * 4 + j, :, :] += sigmoid(probability)

    probabilities /= opts.fold_max
    if opts.tta:
        np.save(
            f'probabilities/{opts.logdir.split("/")[-1]}_{opts.img_size[0]}x{opts.img_size[1]}_tta_valid.npy',
            probabilities)
    else:
        np.save(
            f'probabilities/{opts.logdir.split("/")[-1]}_{opts.img_size[0]}x{opts.img_size[1]}_valid.npy',
            probabilities)

    torch.cuda.empty_cache()
    gc.collect()

    class_params = {}
    cv_d = []
    for class_id in tqdm.trange(opts.class_num, desc='class_id', leave=False):
        #         print(class_id)
        attempts = []
        for tt in tqdm.trange(0, 100, 10, desc='top_threshold', leave=False):
            tt /= 100
            for bt in tqdm.trange(0,
                                  100,
                                  10,
                                  desc='bot_threshold',
                                  leave=False):
                bt /= 100
                for ms in tqdm.tqdm([
                        0, 100, 1000, 5000, 10000, 11000, 14000, 15000, 16000,
                        18000, 19000, 20000, 21000, 23000, 25000, 27000, 30000,
                        50000
                ],
                                    desc='min_size',
                                    leave=False):
                    masks = []
                    for i in range(class_id, len(probabilities), 4):
                        probability = probabilities[i]
                        predict, num_predict = post_process(
                            probability, tt, ms, bt)

                        masks.append(predict)

                    d = []
                    for i, j in zip(masks, valid_masks[class_id::4]):
                        #                     print(i.shape, j.shape)
                        if (i.sum() == 0) & (j.sum() == 0):
                            d.append(1)
                        else:
                            d.append(dice(i, j))
                    attempts.append((tt, ms, bt, np.mean(d)))

        attempts_df = pd.DataFrame(
            attempts,
            columns=['top_threshold', 'size', 'bottom_threshold', 'dice'])

        attempts_df = attempts_df.sort_values('dice', ascending=False)
        print(attempts_df.head())
        cv_d.append(attempts_df['dice'].values[0])
        best_top_threshold = attempts_df['top_threshold'].values[0]
        best_size = attempts_df['size'].values[0]
        best_bottom_threshold = attempts_df['bottom_threshold'].values[0]

        class_params[class_id] = (best_top_threshold, best_size,
                                  best_bottom_threshold)
    cv_d = np.array(cv_d)
    print("CV Dice:", np.mean(cv_d))
    pathlist = [
        "../input/test_images/" + i.split("_")[0] for i in sub['Image_Label']
    ]

    del masks
    del valid_masks
    del probabilities
    gc.collect()

    ############# predict ###################
    probabilities = np.zeros((n_test, 4, 350, 525))
    for fold in tqdm.trange(opts.fold_max, desc='fold loop'):
        if opts.refine:
            logdir = f"{opts.logdir}_refine/fold{fold}"
        else:
            logdir = f"{opts.logdir}/fold{fold}"


#         loaders = {"test": test_loader}
        test_dataset = CloudDataset(
            df=sub,
            datatype='test',
            img_ids=test_ids,
            transforms=get_validation_augmentation(opts.img_size),
            preprocessing=get_preprocessing(preprocessing_fn))
        test_loader = DataLoader(test_dataset,
                                 batch_size=opts.batchsize,
                                 shuffle=False,
                                 num_workers=opts.num_workers)
        runner_out = runner.predict_loader(
            model,
            test_loader,
            resume=f"{logdir}/checkpoints/best.pth",
            verbose=True)
        for i, batch in enumerate(
                tqdm.tqdm(runner_out, desc='probability loop')):
            for j, probability in enumerate(batch):
                if probability.shape != (350, 525):
                    probability = cv2.resize(probability,
                                             dsize=(525, 350),
                                             interpolation=cv2.INTER_LINEAR)
                probabilities[i, j, :, :] += sigmoid(probability)
        gc.collect()
    probabilities /= opts.fold_max
    if opts.tta:
        np.save(
            f'probabilities/{opts.logdir.split("/")[-1]}_{opts.img_size[0]}x{opts.img_size[1]}_tta_test.npy',
            probabilities)
    else:
        np.save(
            f'probabilities/{opts.logdir.split("/")[-1]}_{opts.img_size[0]}x{opts.img_size[1]}_test.npy',
            probabilities)
    image_id = 0
    print("##################### start post_process #####################")
    for i in tqdm.trange(n_test, desc='post porocess loop'):
        for probability in probabilities[i]:
            predict, num_predict = post_process(probability,
                                                class_params[image_id % 4][0],
                                                class_params[image_id % 4][1],
                                                class_params[image_id % 4][2])
            if num_predict == 0:
                encoded_pixels.append('')
            else:
                black_mask = get_black_mask(pathlist[image_id])
                predict = np.multiply(predict, black_mask)
                r = mask2rle(predict)
                encoded_pixels.append(r)
            image_id += 1
        gc.collect()
    print("##################### Finish post_process #####################")
    #######################################
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv(
        f'submissions/submission_{opts.logdir.split("/")[-1]}_{opts.img_size[0]}x{opts.img_size[1]}.csv',
        columns=['Image_Label', 'EncodedPixels'],
        index=False)
Ejemplo n.º 20
0
def optimal_valid(k, net, config, loader_fold, ENCODER, ENCODER_WEIGHTS,
                  ACTIVATION):
    runner = SupervisedRunner()
    model = load_model(net, ENCODER, ENCODER_WEIGHTS, ACTIVATION)

    logdir = "./logs/segmentation_{}_{}Fold".format(net, k)
    loaders = {"infer": loader_fold[k]['valid']}

    runner.infer(
        model=model,
        loaders=loaders,
        callbacks=[
            CheckpointCallback(resume=f"{logdir}/checkpoints/{config}.pth"),
            InferCallback()
        ],
    )
    ###################### dummy test ######################
    if 1:
        label_list = ["Fish", "Flower", "Gravel", "Sugar"]
        valid_masks = []
        probabilities = np.zeros(
            (len(loader_fold[k]['valid'].dataset) * 4, 350, 525))
        for i, (batch, output) in tqdm.tqdm_notebook(
                enumerate(
                    zip(loaders['infer'].dataset,
                        runner.callbacks[0].predictions["logits"]))):
            image, mask = batch
            for m in mask:
                if m.shape != (350, 525):
                    m = cv2.resize(m,
                                   dsize=(525, 350),
                                   interpolation=cv2.INTER_LINEAR)
                valid_masks.append(m)

            for j, probability in enumerate(output):
                if probability.shape != (350, 525):
                    probability = cv2.resize(probability,
                                             dsize=(525, 350),
                                             interpolation=cv2.INTER_LINEAR)
                probabilities[i * 4 + j, :, :] = probability

        # Find optimal values
        # First of all, my thanks to @samusram for finding a mistake in my validation
        # https://www.kaggle.com/c/understanding_cloud_organization/discussion/107711#622412
        # And now I find optimal values separately for each class.

        class_params = {}
        for class_id in range(4):
            print(label_list[class_id])
            attempts = []
            for t in range(0, 100, 5):
                t /= 100
                for ms in [0, 100, 1200, 5000, 10000]:
                    masks = []
                    for i in range(class_id, len(probabilities), 4):
                        probability = probabilities[i]
                        predict, num_predict = post_process(
                            sigmoid(probability), t, ms)
                        masks.append(predict)

                    d = []
                    for i, j in zip(masks, valid_masks[class_id::4]):
                        if (i.sum() == 0) & (j.sum() == 0):
                            d.append(1)
                        else:
                            d.append(dice(i, j))

                    attempts.append((t, ms, np.mean(d)))

            attempts_df = pd.DataFrame(attempts,
                                       columns=['threshold', 'size', 'dice'])

            attempts_df = attempts_df.sort_values('dice', ascending=False)
            print(attempts_df.head())
            best_threshold = attempts_df['threshold'].values[0]
            best_size = attempts_df['size'].values[0]
            best_dice = attempts_df['dice'].values[0]

            class_params[class_id] = (best_threshold, best_size, best_dice)

        print("Best Threshold", class_params)
        print()
        print("Avg Valid Dice", (class_params[0][2] + class_params[1][2] +
                                 class_params[2][2] + class_params[3][2]) / 4)
    else:
        class_params = {
            0: (0.6, 10000, 0.614792005689229),
            1: (0.7, 10000, 0.7479094686835059),
            2: (0.55, 10000, 0.6083618093569516),
            3: (0.45, 10000, 0.5766765025111799)
        }

    ###################### dummy test ######################

    # print("Classification Report")

    del loaders
    torch.cuda.empty_cache()
    gc.collect()

    return class_params, runner