示例#1
0
def run(config_file):
    config = load_config(config_file)

    os.makedirs(config.work_dir, exist_ok=True)
    save_config(config, config.work_dir + '/config.yml')

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(
            data_folder=config.data.train_dir,
            df_path=config.data.train_df_path,
            phase=phase,
            batch_size=config.train.batch_size,
            num_workers=config.num_workers,
            idx_fold=config.data.params.idx_fold,
            transforms=all_transforms[phase],
            num_classes=config.data.num_classes,
            pseudo_label_path=config.train.pseudo_label_path,
            task='cls'
        )
        for phase in ['train', 'valid']
    }

    # create model
    model = CustomNet(config.model.encoder, config.data.num_classes)

    # train setting
    criterion = get_loss(config)
    params = [
        {'params': model.base_params(), 'lr': config.optimizer.params.encoder_lr},
        {'params': model.fresh_params(), 'lr': config.optimizer.params.decoder_lr}
    ]
    optimizer = get_optimizer(params, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model)

    callbacks = [MultiClassAccuracyCallback(threshold=0.5), F1ScoreCallback()]
    if os.path.exists(config.work_dir + '/checkpoints/best.pth'):
        callbacks.append(CheckpointCallback(resume=config.work_dir + '/checkpoints/best_full.pth'))

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        callbacks=callbacks,
        verbose=True,
        fp16=True,
    )
def run(config_file, fold=0, device_id=0, ensemble=False):

    os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)

    config = load_config(config_file)
    if not '_fold' in config.work_dir and not ensemble:
        config.work_dir = config.work_dir + '_fold{}'.format(fold)

    testloader = make_loader(
        data_dir=config.data.test_dir,
        df_path=config.data.sample_submission_path,
        features=config.data.features,
        phase='test',
        img_size=(config.data.height, config.data.width),
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        transforms=get_transforms(config.transforms.test),
    )

    if ensemble:
        # load model
        models = []
        for c in model_config_paths:
            for i in range(5):
                models.append(load_fold_model(c, i))
        model = MultiModels(models, tta=False)
    else:
        checkpoint_path = config.work_dir + '/checkpoints/best.pth'
        model = load_model(config_file, checkpoint_path, fold)

    predictions = []
    z_pos = config.data.z_pos[0]
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = model(batch_images.to(config.device))
            batch_preds[:, 0] = torch.sigmoid(batch_preds[:, 0])
            batch_preds[:, z_pos] = depth_transform(batch_preds[:, z_pos])
            batch_preds = batch_preds.data.cpu().numpy()

            for preds in batch_preds:
                coords = extract_coords(
                    preds,
                    features=config.data.features,
                    img_size=(config.data.height, config.data.width),
                    confidence_threshold=config.test.confidence_threshold,
                    distance_threshold=config.test.distance_threshold,
                )
                s = coords2str(coords)
                predictions.append(s)

    # ---------------------------------------------------------------------------------
    # submission
    # ------------------------------------------------------------------------------------------------------------
    test = pd.read_csv(config.data.sample_submission_path)
    test['PredictionString'] = predictions
    out_path = config.work_dir + '/submission.csv'
    test.to_csv(out_path, index=False)
    postprocess(out_path)
示例#3
0
def run_cls(config_file_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 1. classification inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_cls)

    validloader = make_loader(data_folder=config.data.train_dir,
                              df_path=config.data.train_df_path,
                              phase='valid',
                              batch_size=config.train.batch_size,
                              num_workers=config.num_workers,
                              idx_fold=config.data.params.idx_fold,
                              transforms=get_transforms(
                                  config.transforms.test),
                              num_classes=config.data.num_classes,
                              task='cls')

    model = CustomNet(config.model.encoder, config.data.num_classes)
    model.to(config.device)
    model.eval()
    checkpoint = load_checkpoint(f"{config.work_dir}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])

    all_predictions = []
    all_targets = []
    with torch.no_grad():
        for i, (batch_images, batch_targets) in enumerate(tqdm(validloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta,
                                        task='cls')

            all_targets.append(batch_targets)
            all_predictions.append(batch_preds)

    all_predictions = np.concatenate(all_predictions)
    all_targets = np.concatenate(all_targets)

    # evaluation
    all_accuracy_scores = []
    all_f1_scores = []
    thresholds = np.linspace(0.1, 0.9, 9)
    for th in thresholds:
        accuracy = accuracy_score(all_targets > th, all_predictions > th)
        f1 = f1_score(all_targets > th,
                      all_predictions > th,
                      average='samples')
        all_accuracy_scores.append(accuracy)
        all_f1_scores.append(f1)

    for th, score in zip(thresholds, all_accuracy_scores):
        print('validation accuracy for threshold {} = {}'.format(th, score))
    for th, score in zip(thresholds, all_f1_scores):
        print('validation f1 score for threshold {}  = {}'.format(th, score))

    np.save('valid_preds', all_predictions)
示例#4
0
def run_cls(config_dir):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 1. classification inference
    # ------------------------------------------------------------------------------------------------------------
    config_root = Path(config_dir) / 'cls'
    config_paths = [config_root / p for p in os.listdir(config_root)]
    base_config_paths = [
        Path(config_dir) / p for p in os.listdir(config_dir) if 'yml' in p
    ]
    config = load_config(base_config_paths[0])

    models = []
    for c in config_paths:
        models.append(load_model(c))

    model = MultiClsModels(models)

    testloader = make_loader(
        data_folder=config.data.test_dir,
        df_path=config.data.sample_submission_path,
        phase='test',
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        transforms=get_transforms(config.transforms.test),
        num_classes=config.data.num_classes,
    )

    all_fnames = []
    all_predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta,
                                        task='cls')

            all_fnames.extend(batch_fnames)
            all_predictions.append(batch_preds)

    all_predictions = np.concatenate(all_predictions)

    np.save('all_preds', all_predictions)
    df = pd.DataFrame(data=all_predictions, index=all_fnames)

    df.to_csv('cls_preds.csv')
    df.to_csv(KAGGLE_WORK_DIR + '/cls_preds.csv')
示例#5
0
def run_cls(config_file_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 1. classification inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_cls)

    model = CustomNet(config.model.encoder, config.data.num_classes)

    testloader = make_loader(
        data_folder=config.data.test_dir,
        df_path=config.data.sample_submission_path,
        phase='test',
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        transforms=get_transforms(config.transforms.test),
        num_classes=config.data.num_classes,
    )

    model.to(config.device)
    model.eval()

    checkpoint = load_checkpoint(f"{config.work_dir}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])

    all_fnames = []
    all_predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta,
                                        task='cls')

            all_fnames.extend(batch_fnames)
            all_predictions.append(batch_preds)

    all_predictions = np.concatenate(all_predictions)

    np.save('all_preds', all_predictions)
    df = pd.DataFrame(data=all_predictions, index=all_fnames)

    df.to_csv('cls_preds.csv', index=False)
    df.to_csv(f"{config.work_dir}/cls_preds.csv", index=False)
示例#6
0
    def get_model_and_loader(config_paths):
        config = load_config(config_paths[0])

        models = []
        for c in config_paths:
            models.append(load_model(c))

        model = MultiSegModels(models)

        testloader = make_loader(
            data_folder=config.data.test_dir,
            df_path=config.data.sample_submission_path,
            phase='test',
            img_size=(config.data.height, config.data.width),
            batch_size=config.test.batch_size,
            num_workers=config.num_workers,
            transforms=get_transforms(config.transforms.test))
        return model, testloader
示例#7
0
def run_seg(config_file_seg):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 2. segmentation inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_seg)

    model = getattr(smp, config.model.arch)(
        encoder_name=config.model.encoder,
        encoder_weights=config.model.pretrained,
        classes=config.data.num_classes,
        activation=None,
    )

    if os.path.exists('cls_preds.csv'):
        testloader = make_loader(data_folder=config.data.test_dir,
                                 df_path='cls_preds.csv',
                                 phase='filtered_test',
                                 batch_size=config.test.batch_size,
                                 num_workers=config.num_workers,
                                 transforms=get_transforms(
                                     config.transforms.test))
    else:
        testloader = make_loader(data_folder=config.data.test_dir,
                                 df_path=config.data.sample_submission_path,
                                 phase='test',
                                 batch_size=config.test.batch_size,
                                 num_workers=config.num_workers,
                                 transforms=get_transforms(
                                     config.transforms.test))

    model.to(config.device)
    model.eval()

    checkpoint = load_checkpoint(f"{config.work_dir}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])

    if os.path.exists(config.work_dir + '/threshold_search.json'):
        with open(config.work_dir + '/threshold_search.json') as json_file:
            data = json.load(json_file)
        df = pd.DataFrame(data)
        min_sizes = list(df.T.idxmax().values.astype(int))
        print('load best threshold from validation:', min_sizes)
    else:
        min_sizes = config.test.min_size
        print('load default threshold:', min_sizes)

    predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta)

            for fname, preds in zip(batch_fnames, batch_preds):
                if config.data.num_classes == 4:
                    for cls in range(preds.shape[0]):
                        mask = preds[cls, :, :]
                        mask, num = post_process(mask,
                                                 config.test.best_threshold,
                                                 min_sizes[cls])
                        rle = mask2rle(mask)
                        name = fname + f"_{cls + 1}"
                        predictions.append([name, rle])
                else:  # == 5
                    for cls in range(1, 5):
                        mask = preds[cls, :, :]
                        mask, num = post_process(mask,
                                                 config.test.best_threshold,
                                                 min_sizes[cls])
                        rle = mask2rle(mask)
                        name = fname + f"_{cls}"
                        predictions.append([name, rle])

    # ------------------------------------------------------------------------------------------------------------
    # submission
    # ------------------------------------------------------------------------------------------------------------
    df = pd.DataFrame(predictions,
                      columns=['ImageId_ClassId', 'EncodedPixels'])
    df.to_csv(config.work_dir + "/submission.csv", index=False)
示例#8
0
def run_seg(config_file_seg):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 2. segmentation inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_seg)
    if 'COLAB_GPU' in os.environ:
        config.work_dir = '/content/drive/My Drive/kaggle_cloud/' + config.work_dir
    elif 'KAGGLE_WORKING_DIR' in os.environ:
        config.work_dir = '/kaggle/working/' + config.work_dir

    if os.path.exists('cls_preds.csv'):
        testloader = make_loader(
            data_folder=config.data.test_dir,
            df_path='cls_preds.csv',
            phase='filtered_test',
            img_size=(config.data.height, config.data.width),
            batch_size=config.test.batch_size,
            num_workers=config.num_workers,
            transforms=get_transforms(config.transforms.test))
    else:
        testloader = make_loader(
            data_folder=config.data.test_dir,
            df_path=config.data.sample_submission_path,
            phase='test',
            img_size=(config.data.height, config.data.width),
            batch_size=config.test.batch_size,
            num_workers=config.num_workers,
            transforms=get_transforms(config.transforms.test))

    model = load_model(config_file_seg)

    if os.path.exists(config.work_dir + '/threshold_search.json'):
        with open(config.work_dir + '/threshold_search.json') as json_file:
            data = json.load(json_file)
        df = pd.DataFrame(data)
        min_sizes = list(df.T.idxmax().values.astype(int))
        print('load best threshold from validation:', min_sizes)
    else:
        min_sizes = config.test.min_size
        print('load default threshold:', min_sizes)

    predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta)

            for fname, preds in zip(batch_fnames, batch_preds):
                for cls in range(preds.shape[0]):
                    pred, _ = post_process(
                        preds[cls, :, :],
                        config.test.best_threshold,
                        min_sizes[cls],
                        height=config.transforms.test.Resize.height,
                        width=config.transforms.test.Resize.width)
                    pred = cv2.resize(pred, (SUB_WIDTH, SUB_HEIGHT))
                    pred = (pred > 0.5).astype(int)
                    rle = mask2rle(pred)
                    cls_name = INV_CLASSES[cls]
                    name = fname + f"_{cls_name}"
                    predictions.append([name, rle])

    # ------------------------------------------------------------------------------------------------------------
    # submission
    # ------------------------------------------------------------------------------------------------------------
    df = pd.DataFrame(predictions, columns=['Image_Label', 'EncodedPixels'])
    df.to_csv(config.work_dir + "/submission.csv", index=False)
示例#9
0
def run(config_file, fold=0, device_id=0):

    os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)

    config = load_config(config_file)

    if not '_fold' in config.work_dir:
        config.work_dir = config.work_dir + '_fold{}'.format(fold)

    validloader = make_loader(
        data_dir=config.data.train_dir,
        df_path=config.data.train_df_path,
        features=config.data.features,
        phase='valid',
        img_size=(config.data.height, config.data.width),
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        idx_fold=fold,
        transforms=get_transforms(config.transforms.test),
        model_scale=config.data.model_scale,
        return_fnames=True,
    )

    # load model
    checkpoint_path = config.work_dir + '/checkpoints/best.pth'
    model = load_model(config_file, checkpoint_path)

    folds = pd.read_csv('data/folds.csv')

    predictions = []
    targets = []
    image_ids = []
    z_pos = config.data.z_pos[0]
    with torch.no_grad():
        for i, (batch_images, batch_mask_regr,
                batch_image_ids) in enumerate(tqdm(validloader)):
            batch_preds = model(batch_images.to(config.device))
            batch_preds[:, 0] = torch.sigmoid(batch_preds[:, 0])
            batch_preds[:, z_pos] = depth_transform(batch_preds[:, z_pos])

            batch_preds = batch_preds.data.cpu().numpy()
            batch_mask_regr = batch_mask_regr.data.cpu().numpy()
            image_ids.extend(batch_image_ids)

            for preds, mask_regr, image_id in zip(batch_preds, batch_mask_regr,
                                                  batch_image_ids):
                coords = extract_coords(
                    preds,
                    features=config.data.features,
                    img_size=(config.data.height, config.data.width),
                    confidence_threshold=config.test.confidence_threshold,
                    distance_threshold=config.test.distance_threshold,
                )
                predictions.append(coords)

                s = folds.loc[folds.ImageId == image_id.split('.jpg')[0],
                              'PredictionString'].values[0]
                true_coords = str2coords(
                    s, names=['id', 'yaw', 'pitch', 'roll', 'x', 'y', 'z'])
                targets.append(true_coords)

    with open(config.work_dir + '/predictions.pkl', 'wb') as f:
        pickle.dump(predictions, f)
    with open(config.work_dir + '/targets.pkl', 'wb') as f:
        pickle.dump(targets, f)

    rows = []
    for p, i in zip(predictions, image_ids):
        rows.append({'ImageId': i, 'PredictionString': coords2str(p)})
    pred_df = pd.DataFrame(rows)
    pred_df.to_csv(config.work_dir + '/val_pred.csv', index=False)

    all_result, result = calc_map_score(targets, predictions)
    result['confidence_threshold'] = config.test.confidence_threshold
    result['distance_threshold'] = config.test.distance_threshold

    dict_to_json(
        all_result, config.work_dir +
        '/all_result_th{}.json'.format(config.test.distance_threshold))
    dict_to_json(
        result, config.work_dir +
        '/result_th{}.json'.format(config.test.distance_threshold))

    for k in sorted(result.keys()):
        print(k, result[k])
示例#10
0
def run(config_file):
    config = load_config(config_file)

    if not os.path.exists(config.work_dir):
        os.makedirs(config.work_dir, exist_ok=True)
    save_config(config, config.work_dir + '/config.yml')

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(data_folder=config.data.train_dir,
                           df_path=config.data.train_df_path,
                           phase=phase,
                           batch_size=config.train.batch_size,
                           num_workers=config.num_workers,
                           idx_fold=config.data.params.idx_fold,
                           transforms=all_transforms[phase],
                           num_classes=config.data.num_classes,
                           pseudo_label_path=config.train.pseudo_label_path,
                           debug=config.debug)
        for phase in ['train', 'valid']
    }

    # create segmentation model with pre trained encoder
    model = getattr(smp, config.model.arch)(
        encoder_name=config.model.encoder,
        encoder_weights=config.model.pretrained,
        classes=config.data.num_classes,
        activation=None,
    )

    # train setting
    criterion = get_loss(config)
    params = [
        {
            'params': model.decoder.parameters(),
            'lr': config.optimizer.params.decoder_lr
        },
        {
            'params': model.encoder.parameters(),
            'lr': config.optimizer.params.encoder_lr
        },
    ]
    optimizer = get_optimizer(params, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model)

    callbacks = [DiceCallback(), IouCallback()]

    # to resume from check points if exists
    if os.path.exists(config.work_dir + '/checkpoints/best.pth'):
        callbacks.append(
            CheckpointCallback(resume=config.work_dir +
                               '/checkpoints/best_full.pth'))

    if config.train.mixup:
        callbacks.append(MixupCallback())

    if config.train.cutmix:
        callbacks.append(CutMixCallback())

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        callbacks=callbacks,
        verbose=True,
        fp16=True,
    )
示例#11
0
def run_seg(config_file_seg):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 2. segmentation inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_seg)

    validloader = make_loader(
        data_folder=config.data.train_dir,
        df_path=config.data.train_df_path,
        phase='valid',
        batch_size=config.train.batch_size,
        num_workers=config.num_workers,
        idx_fold=config.data.params.idx_fold,
        transforms=get_transforms(config.transforms.test),
        num_classes=config.data.num_classes,
    )

    # create segmentation model with pre-trained encoder
    model = getattr(smp, config.model.arch)(
        encoder_name=config.model.encoder,
        encoder_weights=config.model.pretrained,
        classes=config.data.num_classes,
        activation=None,
    )
    model.to(config.device)
    model.eval()
    checkpoint = load_checkpoint(f"{config.work_dir}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])

    all_dice = {}
    min_sizes = [100, 300, 500, 750, 1000, 1500, 2000, 3000]
    for min_size in min_sizes:
        all_dice[min_size] = {}
        for cls in range(config.data.num_classes):
            all_dice[min_size][cls] = []

    with torch.no_grad():
        for i, (batch_images, batch_masks) in enumerate(tqdm(validloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta)

            batch_masks = batch_masks.cpu().numpy()

            for masks, preds in zip(batch_masks, batch_preds):
                for cls in range(config.data.num_classes):
                    for min_size in min_sizes:
                        pred, _ = post_process(preds[cls, :, :],
                                               config.test.best_threshold,
                                               min_size)
                        mask = masks[cls, :, :]
                        all_dice[min_size][cls].append(dice_score(pred, mask))

    for cls in range(config.data.num_classes):
        for min_size in min_sizes:
            all_dice[min_size][cls] = sum(all_dice[min_size][cls]) / len(
                all_dice[min_size][cls])
            dict_to_json(all_dice, config.work_dir + '/threshold_search.json')
            if config.data.num_classes == 4:
                defect_class = cls + 1
            else:
                defect_class = cls
            print('average dice score for class{} for min_size {}: {}'.format(
                defect_class, min_size, all_dice[min_size][cls]))
示例#12
0
def run(config_file, device_id, idx_fold):
    os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)
    print('info: use gpu No.{}'.format(device_id))

    config = load_config(config_file)

    # for n-folds loop
    if config.data.params.idx_fold == -1:
        config.data.params.idx_fold = idx_fold
        config.work_dir = config.work_dir + '_fold{}'.format(idx_fold)
    elif config.data.params.idx_fold == 0:
        original_fold = int(config.work_dir.split('_fold')[1])
        if original_fold == idx_fold:
            raise Exception(
                'if you specify fold 0, you should use train.py or resume from fold 1.'
            )
        config.data.params.idx_fold = idx_fold
        config.work_dir = config.work_dir.split('_fold')[0] + '_fold{}'.format(
            idx_fold)
    else:
        raise Exception('you should use train.py if idx_fold is specified.')
    print('info: training for fold {}'.format(idx_fold))

    if not os.path.exists(config.work_dir):
        os.makedirs(config.work_dir, exist_ok=True)

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(
            df_path=config.data.train_df_path,
            data_dir=config.data.train_dir,
            features=config.data.features,
            phase=phase,
            img_size=(config.data.height, config.data.width),
            batch_size=config.train.batch_size,
            num_workers=config.num_workers,
            idx_fold=config.data.params.idx_fold,
            transforms=all_transforms[phase],
            horizontal_flip=config.train.horizontal_flip,
            model_scale=config.data.model_scale,
            debug=config.debug,
            pseudo_path=config.data.pseudo_path,
        )
        for phase in ['train', 'valid']
    }

    # create segmentation model with pre trained encoder
    num_features = len(config.data.features)
    print('info: num_features =', num_features)
    model = CenterNetFPN(
        slug=config.model.encoder,
        num_classes=num_features,
    )

    optimizer = get_optimizer(model, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model, device=get_device())

    # train setting
    criterion, callbacks = get_criterion_and_callback(config)

    if config.train.early_stop_patience > 0:
        callbacks.append(
            EarlyStoppingCallback(patience=config.train.early_stop_patience))

    if config.train.accumulation_size > 0:
        accumulation_steps = config.train.accumulation_size // config.train.batch_size
        callbacks.extend(
            [OptimizerCallback(accumulation_steps=accumulation_steps)])

    # to resume from check points if exists
    if os.path.exists(config.work_dir + '/checkpoints/last_full.pth'):
        callbacks.append(
            CheckpointCallback(resume=config.work_dir +
                               '/checkpoints/last_full.pth'))

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        main_metric=config.train.main_metric,
        minimize_metric=config.train.minimize_metric,
        callbacks=callbacks,
        verbose=True,
        fp16=config.train.fp16,
    )
示例#13
0
        image_path_list=valid_data[0],
        label_list=valid_data[1],
        transform=valid_transform(x_size=x_size,
                                  y_size=y_size,
                                  mean=mean,
                                  std=std))

    test_dataset = datasets.NSvalidationimageData(image_path_list=test_data[0],
                                                  label_list=test_data[1],
                                                  transform=valid_transform(
                                                      x_size=x_size,
                                                      y_size=y_size,
                                                      mean=mean,
                                                      std=std))

    train_loader = make_loader(dataset=train_dataset, batch_size=train_batch)

    valid_loader = make_loader(dataset=valid_dataset, batch_size=valid_batch)

    test_loader = make_loader(dataset=test_dataset, batch_size=valid_batch)

    #model
    net = torch_models.Network(block=block,
                               input_shape=(3, y_size, x_size),
                               fc_shapes=fc_shapes,
                               n_classes=n_classes)

    net.train()

    update_params = [] if not fine_tuning else net.parameters()
示例#14
0
def validation(config_file_seg):

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    config = load_config(config_file_seg)
    if 'COLAB_GPU' in os.environ:
        config.work_dir = '/content/drive/My Drive/kaggle_cloud/' + config.work_dir
    elif 'KAGGLE_WORKING_DIR' in os.environ:
        config.work_dir = '/kaggle/working/' + config.work_dir

    validloader = make_loader(
        data_folder=config.data.train_dir,
        df_path=config.data.train_df_path,
        phase='valid',
        img_size=(config.data.height, config.data.width),
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        idx_fold=config.data.params.idx_fold,
        transforms=get_transforms(config.transforms.test),
        num_classes=config.data.num_classes,
    )

    model = load_model(config_file_seg)

    min_sizes = np.arange(0, 20000, 5000)
    label_thresholds = [0.6, 0.7, 0.8]
    mask_thresholds = [0.2, 0.3, 0.4]
    all_dice = np.zeros(
        (4, len(label_thresholds), len(mask_thresholds), len(min_sizes)))
    count = 0

    with torch.no_grad():
        for i, (batch_images, batch_masks) in enumerate(tqdm(validloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta)

            batch_labels = torch.nn.functional.adaptive_max_pool2d(
                torch.sigmoid(torch.Tensor(batch_preds)),
                1).view(batch_preds.shape[0], -1)

            batch_masks = batch_masks.cpu().numpy()
            batch_labels = batch_labels.cpu().numpy()

            batch_masks = resize_batch_images(batch_masks, SUB_HEIGHT,
                                              SUB_WIDTH)
            batch_preds = resize_batch_images(batch_preds, SUB_HEIGHT,
                                              SUB_WIDTH)

            for labels, masks, preds in zip(batch_labels, batch_masks,
                                            batch_preds):
                for cls in range(config.data.num_classes):
                    for i, label_th in enumerate(label_thresholds):
                        for j, mask_th in enumerate(mask_thresholds):
                            for k, min_size in enumerate(min_sizes):
                                if labels[cls] <= label_th:
                                    pred = np.zeros(preds[cls, :, :].shape)
                                else:
                                    pred, _ = post_process(preds[cls, :, :],
                                                           mask_th,
                                                           min_size,
                                                           height=SUB_HEIGHT,
                                                           width=SUB_WIDTH)
                                mask = masks[cls, :, :]

                                dice = dice_score(pred, mask)
                                all_dice[cls, i, j, k] += dice
                count += 1

    all_dice = all_dice / (count)
    np.save('all_dice', all_dice)

    parameters = {}
    parameters['label_thresholds'] = []
    parameters['mask_thresholds'] = []
    parameters['min_sizes'] = []
    parameters['dice'] = []
    cv_score = 0

    for cls in range(4):
        i, j, k = np.where((all_dice[cls] == all_dice[cls].max()))
        parameters['label_thresholds'].append(float(label_thresholds[i[0]]))
        parameters['mask_thresholds'].append(float(mask_thresholds[j[0]]))
        parameters['min_sizes'].append(int(min_sizes[k[0]]))
        parameters['dice'].append(float(all_dice[cls].max()))
        cv_score += all_dice[cls].max() / 4

    print('cv_score:', cv_score)
    dict_to_json(parameters, config.work_dir + '/parameters.json')
    print(pd.DataFrame(parameters))
示例#15
0
def run_seg(config_dir):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 2. segmentation inference
    # ------------------------------------------------------------------------------------------------------------
    config_root = Path(config_dir) / 'seg'
    config_paths = [config_root / p for p in os.listdir(config_root)]
    base_config_paths = [
        Path(config_dir) / p for p in os.listdir(config_dir) if 'yml' in p
    ]
    config = load_config(base_config_paths[0])

    models = []
    for c in config_paths:
        models.append(load_model(c))

    model = MultiSegModels(models)

    if os.path.exists('cls_preds.csv'):
        testloader = make_loader(data_folder=config.data.test_dir,
                                 df_path='cls_preds.csv',
                                 phase='filtered_test',
                                 batch_size=config.test.batch_size,
                                 num_workers=config.num_workers,
                                 transforms=get_transforms(
                                     config.transforms.test))
    else:
        testloader = make_loader(data_folder=config.data.test_dir,
                                 df_path=config.data.sample_submission_path,
                                 phase='test',
                                 batch_size=config.test.batch_size,
                                 num_workers=config.num_workers,
                                 transforms=get_transforms(
                                     config.transforms.test))

    if os.path.exists(config.work_dir + '/threshold_search.json'):
        with open(config.work_dir + '/threshold_search.json') as json_file:
            data = json.load(json_file)
        df = pd.DataFrame(data)
        min_sizes = list(df.T.idxmax().values.astype(int))
        print('load best threshold from validation:', min_sizes)
    else:
        min_sizes = config.test.min_size
        print('load default threshold:', min_sizes)

    predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta)

            for fname, preds in zip(batch_fnames, batch_preds):
                for cls in range(preds.shape[0]):
                    mask = preds[cls, :, :]
                    mask, num = post_process(mask, config.test.best_threshold,
                                             min_sizes[cls])
                    rle = mask2rle(mask)
                    name = fname + f"_{cls + 1}"
                    predictions.append([name, rle])

    # ------------------------------------------------------------------------------------------------------------
    # submission
    # ------------------------------------------------------------------------------------------------------------
    sub_df = pd.DataFrame(predictions,
                          columns=['ImageId_ClassId', 'EncodedPixels'])

    sample_submission = pd.read_csv(config.data.sample_submission_path)
    df_merged = pd.merge(sample_submission,
                         sub_df,
                         on='ImageId_ClassId',
                         how='left')
    df_merged.fillna('', inplace=True)
    df_merged['EncodedPixels'] = df_merged['EncodedPixels_y']
    df_merged = df_merged[['ImageId_ClassId', 'EncodedPixels']]

    df_merged.to_csv("submission.csv", index=False)
    df_merged.to_csv(KAGGLE_WORK_DIR + "/submission.csv", index=False)
示例#16
0
def run(config_file):
    config = load_config(config_file)
    if 'COLAB_GPU' in os.environ:
        config.work_dir = '/content/drive/My Drive/kaggle_cloud/' + config.work_dir
    elif 'KAGGLE_WORKING_DIR' in os.environ:
        config.work_dir = '/kaggle/working/' + config.work_dir
    print('working directory:', config.work_dir)

    if not os.path.exists(config.work_dir):
        os.makedirs(config.work_dir, exist_ok=True)
    save_config(config, config.work_dir + '/config.yml')

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(
            data_folder=config.data.train_dir,
            df_path=config.data.train_df_path,
            phase=phase,
            img_size=(config.data.height, config.data.width),
            batch_size=config.train.batch_size,
            num_workers=config.num_workers,
            idx_fold=config.data.params.idx_fold,
            transforms=all_transforms[phase],
            num_classes=config.data.num_classes,
            pseudo_label_path=config.train.pseudo_label_path,
            debug=config.debug
        )
        for phase in ['train', 'valid']
    }

    # create segmentation model with pre trained encoder
    model = getattr(smp, config.model.arch)(
        encoder_name=config.model.encoder,
        encoder_weights=config.model.pretrained,
        classes=config.data.num_classes,
        activation=None,
    )

    # train setting
    criterion = get_loss(config)
    params = [
        {'params': model.decoder.parameters(), 'lr': config.optimizer.params.decoder_lr},
        {'params': model.encoder.parameters(), 'lr': config.optimizer.params.encoder_lr},
    ]
    optimizer = get_optimizer(params, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model, device=get_device())

    callbacks = [DiceCallback(), IouCallback()]

    if config.train.early_stop_patience > 0:
        callbacks.append(EarlyStoppingCallback(
            patience=config.train.early_stop_patience))

    if config.train.accumulation_size > 0:
        accumulation_steps = config.train.accumulation_size // config.train.batch_size
        callbacks.extend(
            [CriterionCallback(),
             OptimizerCallback(accumulation_steps=accumulation_steps)]
        )

    # to resume from check points if exists
    if os.path.exists(config.work_dir + '/checkpoints/best.pth'):
        callbacks.append(CheckpointCallback(
            resume=config.work_dir + '/checkpoints/last_full.pth'))

    if config.train.mixup:
        callbacks.append(MixupCallback())

    if config.train.cutmix:
        callbacks.append(CutMixCallback())

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        main_metric=config.train.main_metric,
        minimize_metric=config.train.minimize_metric,
        callbacks=callbacks,
        verbose=True,
        fp16=True,
    )