Beispiel #1
0
def single_model_predict_tta():
    assert len(model_name_list) == 1
    model_name = model_name_list[0]
    model = Net(model_name).to(device)
    model_save_path = os.path.join(
        config.dir_weight, '{}.bin'.format(model_name))
    model.load_state_dict(torch.load(model_save_path))

    transforms = tta.Compose([
        tta.HorizontalFlip(),
        # tta.Rotate90(angles=[0, 180]),
        # tta.Scale(scales=[1, 2, 4]),
        # tta.Multiply(factors=[0.9, 1, 1.1]),
        tta.FiveCrops(224, 224)
    ])

    tta_model = tta.ClassificationTTAWrapper(model, transforms)

    pred_list = []
    with torch.no_grad():
        for batch_x, _ in tqdm(test_loader):
            batch_x = batch_x.to(device)
            probs = tta_model(batch_x)
            probs = torch.max(torch.softmax(probs, dim=1), dim=1)
            probs = probs[1].cpu().numpy()
            pred_list += probs.tolist()

    submission = pd.DataFrame({
        "id": range(len(pred_list)),
        "label": [int2label(x) for x in pred_list]
    })
    submission.to_csv(config.dir_csv_test, index=False, header=False)
Beispiel #2
0
def multi_model_predict_tta():
    preds_dict = dict()
    for model_name in model_name_list:
        for fold_idx in range(5):
            model = Net(model_name).to(device)
            model_save_path = os.path.join(
                config.dir_weight, '{}_fold{}.bin'.format(model_name, fold_idx))
            model.load_state_dict(torch.load(model_save_path))
            '/home/muyun99/data/dataset/AIyanxishe/Image_Classification/weight/resnet18_train_size_256_fold0.bin'
            transforms = tta.Compose([
                tta.Resize([int(config.size_test_image), int(config.size_test_image)]),
                tta.HorizontalFlip(),
                # tta.Rotate90(angles=[0, 180]),
                # tta.Scale(scales=[1, 2, 4]),
                # tta.Multiply(factors=[0.9, 1, 1.1]),
                tta.FiveCrops(config.size_test_image, config.size_test_image)
            ])
            tta_model = tta.ClassificationTTAWrapper(model, transforms)

            pred_list = predict(tta_model)
            submission = pd.DataFrame(pred_list)
            submission.to_csv(
                '{}/{}_fold{}_submission.csv'.format(config.dir_submission, config.save_model_name, fold_idx),
                index=False,
                header=False
            )
            preds_dict['{}_{}'.format(model_name, fold_idx)] = pred_list

    pred_list = get_pred_list(preds_dict)
    submission = pd.DataFrame(
        {"id": range(len(pred_list)), "label": [int2label(x) for x in pred_list]})
    submission.to_csv(config.dir_csv_test, index=False, header=False)
Beispiel #3
0
def main():
    load_dotenv('cassava.env')
    seed_everything(SEED)

    root_path = os.getenv('ROOT_PATH')
    train_csv_path = root_path + 'train.csv'
    train_root_path = root_path + 'train_images'

    num_classes = int(os.getenv('NUM_CLASSES', 5))
    num_epoch = int(os.getenv('NUM_EPOCH', 10))
    num_folds = int(os.getenv('NUM_FOLDS', 5))
    batch_size = int(os.getenv('BATCH_SIZE'), 16)
    grad_acc = int(os.getenv('GRAD_ACC', 8))

    resize = os.getenv('RESIZE', 224)

    normalize = A.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    train_transform = A.Compose([
        A.HorizontalFlip(),
        A.ShiftScaleRotate(p=1.0),
        A.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.2, hue=0.0, p=1.0, always_apply=False),
        A.RandomResizedCrop(resize, resize, p=1.0, always_apply=True),
        normalize,
        ToTensorV2(p=1.0),
    ], p=1.0)
    test_transform = A.Compose([
        A.Resize(int(resize * 1.5), int(resize * 1.5)),
        normalize,
        ToTensorV2(p=1.0),
    ], p=1.0)
    tta_transform = tta.Compose([
        tta.FiveCrops(resize, resize),
    ])

    criterion = MixedLabelLoss(nn.CrossEntropyLoss(reduction='none'))
    augmentations = [snapmix, ]

    df = pd.read_csv(train_csv_path)
    folds = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=SEED).split(df['image_id'], df['label'])
    for _fold, (train, test) in enumerate(folds):
        train = df.iloc[train]
        test = df.iloc[test]
        scheduler = optim.lr_scheduler.CosineAnnealingLR

        model = TimmNet('efficientnet_b3a', num_classes, criterion, learning_rate=1e-3, scheduler=scheduler,
                        n_epoch=num_epoch, eta_min=1e-6, augmentations=augmentations, tta_transform=tta_transform)
        dm = DataFrameDataModule(train, train_root_path, test, batch_size=batch_size,
                                 train_transform=train_transform, test_transform=test_transform)

        mlf_logger = MLFlowLogger(
            experiment_name='cassava',
            tracking_uri='file:./cassava'
        )
        trainer = Trainer(gpus=-1, precision=32, deterministic=True, accumulate_grad_batches=grad_acc,
                          profiler='simple', val_check_interval=1.0, logger=mlf_logger, max_epochs=num_epoch)
        trainer.fit(model, datamodule=dm)
Beispiel #4
0
def test_fivecrop_transform():
    transform = tta.FiveCrops(crop_height=1, crop_width=1)
    a = torch.arange(25).reshape(1, 1, 5, 5).float()
    output = [0, 20, 24, 4, 12]
    for i, p in enumerate(transform.params):
        aug = transform.apply_aug_image(a, **{transform.pname: p})
        assert aug.item() == output[i]

#
# def test_resize_transform():
#     transform = tta.Resize(sizes=[(10, 10), (5, 5)], original_size=(5, 5))
#     a = torch.arange(25).reshape(1, 1, 5, 5).float()
#     for i, p in enumerate(transform.params):
#         aug = transform.apply_aug_image(a, **{transform.pname: p})
#         assert aug.item() == output[i]
Beispiel #5
0
    for p in transform.params:
        aug = transform.apply_aug_image(a, **{transform.pname: p})
        deaug = transform.apply_deaug_mask(aug, **{transform.pname: p})
        assert torch.allclose(a, deaug)


@pytest.mark.parametrize(
    "transform",
    [
        tta.HorizontalFlip(),
        tta.VerticalFlip(),
        tta.Rotate90(angles=[0, 90, 180, 270]),
        tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
        tta.Add(values=[-1, 0, 1, 2]),
        tta.Multiply(factors=[-1, 0, 1, 2]),
        tta.FiveCrops(crop_height=3, crop_width=5),
        tta.Resize(sizes=[(4, 5), (8, 10), (2, 2)], interpolation="nearest")
    ],
)
def test_label_is_same(transform):
    a = torch.arange(20).reshape(1, 1, 4, 5).float()
    for p in transform.params:
        aug = transform.apply_aug_image(a, **{transform.pname: p})
        deaug = transform.apply_deaug_label(aug, **{transform.pname: p})
        assert torch.allclose(aug, deaug)


def test_add_transform():
    transform = tta.Add(values=[-1, 0, 1])
    a = torch.arange(20).reshape(1, 1, 4, 5).float()
    for p in transform.params:
Beispiel #6
0
import time
from progress.bar import Bar
import torchvision.transforms as transforms
from dataloader.EyeQ_loader import DatasetGenerator
from utils.trainer_unc_origin import train_step, validation_step, save_output
from utils.metric_origin import compute_metric
from refuge.cropDiscRegion import cropDiscRegion
import pandas as pd
from networks.efficient_origin import EfficientNet_b0, SENet154, Xception, ResNext50, DenseNet121_v0, ResNest, ResNest_unc
import ttach as tta
import natsort
from PIL import Image
import cv2
from eval import eval

transforms_tta = tta.Compose([tta.FiveCrops(224, 224)])

os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

np.random.seed(0)

# Setting parameters
parser = argparse.ArgumentParser(description='EyeQ_dense121')
parser.add_argument('--model_dir',
                    type=str,
                    default='./result_origin_unc_refuge/')
parser.add_argument('--pre_model', type=str, default=True)
parser.add_argument('--save_model', type=str, default='classification_results')

parser.add_argument('--crop_size', type=int, default=224)