Example #1
0
 def configure_optimizers(self, epoch=0, lr=LR):
     """ Initialize parameters for optimizer """
     # parameters = [
     #     {'params': self.classifier.parameters(), 'lr': 2e-4, 'weight_decay': 1e-5}
     # ]
     # if epoch:
     #    parameters.append({'params': self.bert.parameters(), 'lr': lr})
     # else:
     #     self.freeze_encoder()
     #     print(f'First epoch, freezing encoder\nTrainable parameters: {self.n_trainable()}')
     parameters = {'params': self.parameters(), 'lr': LR, 'weight_decay': 0}
     optimizer = RAdam(**parameters)
     return optimizer
Example #2
0
def main():
    # setup config
    cfg = config()
    cfg['device'] = torch.device(
        "cuda" if torch.cuda.is_available() else "cpu")
    timestr = time.strftime("%Y%m%d-%H%M%S")
    cfg['logdir'] += f"{cfg['arch']}_"
    cfg['logdir'] += f"{cfg['exp_idx']}_"
    cfg['logdir'] += f"{cfg['input_size']}_"
    cfg['logdir'] += f"{cfg['criterion']}_"
    cfg['logdir'] += f"{cfg['optimizer']}_"
    cfg['logdir'] += f"split{cfg['data_split']}_"
    cfg['logdir'] += timestr
    set_global_seed(cfg['random_state'])
    pprint(cfg)

    # load data
    train_df = pd.read_csv(cfg['train_csv_path'])
    test_df = pd.read_csv(cfg['test_csv_path'])
    print(len(train_df), len(test_df))
    train_img_weights = compute_dataset_weights(train_df)

    train_transforms, test_transforms = get_transforms(cfg['input_size'])
    train_dataset = LeafDataset(
        img_root=cfg['img_root'],
        df=train_df,
        img_transforms=train_transforms,
        is_train=True,
    )

    test_dataset = LeafDataset(
        img_root=cfg['img_root'],
        df=test_df,
        img_transforms=test_transforms,
        is_train=False,
    )
    print(
        f"Training set size:{len(train_dataset)}, Test set size:{len(test_dataset)}")

    # prepare train and test loader
    if cfg['sampling'] == 'weighted':
        # image weight based on statistics
        train_img_weights = compute_dataset_weights(train_df)
        # weighted sampler
        weighted_sampler = WeightedRandomSampler(
            weights=train_img_weights, num_samples=len(train_img_weights), replacement=False)
        # batch sampler from weigted sampler
        batch_sampler = BatchSampler(
            weighted_sampler, batch_size=cfg['batch_size'], drop_last=True)
        # train loader
        train_loader = DataLoader(
            train_dataset, batch_sampler=batch_sampler, num_workers=4)
    elif cfg['sampling'] == 'normal':
        train_loader = DataLoader(
            train_dataset, cfg['batch_size'], shuffle=True, num_workers=2)

    test_loader = DataLoader(
        test_dataset, cfg['test_batch_size'], shuffle=False, num_workers=1, drop_last=True)

    loaders = {
        'train': train_loader,
        'valid': test_loader
    }

    # model setup
    model = timm.create_model(model_name=cfg['arch'], num_classes=len(
        cfg['class_names']), drop_rate=0.5, pretrained=True)
    model.train()

    # loss
    if cfg['criterion'] == 'label_smooth':
        criterion = LabelSmoothingCrossEntropy()
    elif cfg['criterion'] == 'cross_entropy':
        criterion = nn.CrossEntropyLoss()

    # optimizer
    if cfg['optimizer'] == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(), lr=cfg['lr'], weight_decay=cfg['wd'])
    elif cfg['optimizer'] == 'adamw':
        optimizer = AdamW(
            model.parameters(), lr=cfg['lr'], weight_decay=cfg['wd'])
    elif cfg['optimizer'] == 'radam':
        optimizer = RAdam(
            model.parameters(), lr=cfg['lr'], weight_decay=cfg['wd'])

    # learning schedule
    if cfg['lr_schedule'] == 'reduce_plateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, factor=0.5, patience=4)

    # trainer
    runner = SupervisedRunner(device=cfg['device'])
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,

        callbacks=[
            AccuracyCallback(
                num_classes=len(cfg['class_names']),
                threshold=0.5,
                activation="Softmax"
            ),
        ],
        logdir=cfg['logdir'],
        num_epochs=cfg['num_epochs'],
        verbose=cfg['verbose'],
        # set this true to run for 3 epochs only
        check=cfg['check'],
    )
                        num_workers=args.num_workers,
                        pin_memory=False,
                        shuffle=False)

model = AlaskaModel(backbone=args.model, classes=4)
model.cuda()

loaders = collections.OrderedDict()
loaders["train"] = train_loader
loaders["valid"] = val_loader

runner = SupervisedRunner(input_key="image",
                          output_key=None,
                          input_target_key=None)

optimizer = RAdam(model.parameters(), lr=args.lr, weight_decay=0.001)

scheduler = ReduceLROnPlateau(optimizer=optimizer,
                              factor=0.75,
                              patience=3,
                              mode="max")

criterion = {'label_loss': nn.CrossEntropyLoss()}

callbacks = [
    CriterionCallback(
        input_key="label",
        output_key="logit_label",
        prefix="label_loss",
        criterion_key="label_loss",
        multiplier=1.0,
Example #4
0
dataset_test = TaskDataset(test)
dataloader_test = DataLoader(dataset_test, batch_size=128, shuffle=False)

runner = SupervisedRunner(
    device='cuda',
    input_key=['stop', 'delta'],
)

loaders = {
    'train': dataloader_train,
    'valid': dataloader_val
}  #collections.OrderedDict({'train': dataloader_train, 'valid': dataloader_val})

model = ReverseModel()

optimizer = Lookahead(RAdam(params=model.parameters(), lr=1e-3))

criterion = {"bce": nn.BCEWithLogitsLoss()}

scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       factor=0.25,
                                                       patience=2)

callbacks = [
    CriterionCallback(input_key='start', prefix="loss", criterion_key="bce"),
    EarlyStoppingCallback(patience=5),
]

runner.train(
    model=model,
    criterion=criterion,
Example #5
0
def main():
    args = get_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
    SEED = 42
    utils.set_global_seed(SEED)
    utils.prepare_cudnn(deterministic=True)
    num_classes = 14

    #define datasets
    train_dataset = ChestXrayDataSet(
        data_dir=args.path_to_images,
        image_list_file=args.train_list,
        transform=transforms_train,
    )

    val_dataset = ChestXrayDataSet(
        data_dir=args.path_to_images,
        image_list_file=args.val_list,
        transform=transforms_val,
    )

    loaders = {
        'train':
        DataLoader(train_dataset,
                   batch_size=args.batch_size,
                   shuffle=True,
                   num_workers=args.num_workers),
        'valid':
        DataLoader(val_dataset,
                   batch_size=2,
                   shuffle=False,
                   num_workers=args.num_workers)
    }

    logdir = args.log_dir  #where model weights and logs are stored

    #define model
    model = DenseNet121(num_classes)
    if len(args.gpus) > 1:
        model = nn.DataParallel(model)
    device = utils.get_device()
    runner = SupervisedRunner(device=device)

    optimizer = RAdam(model.parameters(), lr=args.lr, weight_decay=0.0003)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     factor=0.25,
                                                     patience=2)

    weights = torch.Tensor(
        [10, 100, 30, 8, 40, 40, 330, 140, 35, 155, 110, 250, 155,
         200]).to(device)
    criterion = BCEWithLogitsLoss(pos_weight=weights)

    class_names = [
        'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
        'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
        'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
    ]

    runner.train(
        model=model,
        logdir=logdir,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,
        num_epochs=args.epochs,

        # We can specify the callbacks list for the experiment;
        # For this task, we will check AUC and accuracy
        callbacks=[
            AUCCallback(
                input_key="targets",
                output_key='logits',
                prefix='auc',
                class_names=class_names,
                num_classes=num_classes,
                activation='Sigmoid',
            ),
            AccuracyCallback(
                input_key="targets",
                output_key="logits",
                prefix="accuracy",
                accuracy_args=[1],
                num_classes=14,
                threshold=0.5,
                activation='Sigmoid',
            ),
        ],
        main_metric='auc/_mean',
        minimize_metric=False,
        verbose=True,
    )
Example #6
0
import torch
import matplotlib.pyplot as plt

from torch.optim.lr_scheduler import ExponentialLR
from .cosine import CosineAnnealingWarmUpRestarts
from catalyst.contrib.nn.optimizers import Lookahead, RAdam

# from catalyst.contrib.nn.optimizers.radam import RAdam

model = torch.nn.Linear(2, 1)
optimizer = RAdam(model.parameters(), lr=0.005)
lr_scheduler = CosineAnnealingWarmUpRestarts(optimizer,
                                             T_0=200,
                                             T_mult=2,
                                             eta_max=0.005,
                                             eta_min=0.0000001,
                                             T_up=10,
                                             gamma=0.1,
                                             end_at_zero=True)
lr_scheduler = ExponentialLR(optimizer, gamma=0.93)
lrs = []

for i in range(200):
    lr_scheduler.step(i)
    lrs.append(optimizer.param_groups[0]["lr"])

print(f"50: {lrs[50]}, 100: {lrs[100]}")
print(min(lrs), max(lrs))

plt.plot(lrs)
plt.savefig("/home/smirnvla/PycharmProjects/catalyst-classification/plt.png")