예제 #1
0
def main():
    model = PSMNet(args.maxdisp, args.mindisp).cuda()
    if args.load_model is not None:
        if args.load is not None:
            warn('args.load is not None. load_model will be covered by load.')
        ckpt = torch.load(args.load_model, 'cpu')
        if 'model' in ckpt.keys():
            pretrained = ckpt['model']
        elif 'state_dict' in ckpt.keys():
            pretrained = ckpt['state_dict']
        else:
            raise RuntimeError()
        pretrained = {
            k.replace('module.', ''): v
            for k, v in pretrained.items()
        }
        model.load_state_dict(pretrained)
    train_dl = DataLoader(KITTIRoiDataset(args.data_dir, 'train',
                                          args.resolution, args.maxdisp,
                                          args.mindisp),
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=args.workers)
    val_dl = DataLoader(KITTIRoiDataset(args.data_dir, 'val', args.resolution,
                                        args.maxdisp, args.mindisp),
                        batch_size=args.batch_size,
                        num_workers=args.workers)

    loss_fn = PSMLoss()

    databunch = DataBunch(train_dl, val_dl, device='cuda')
    learner = Learner(databunch,
                      model,
                      loss_func=loss_fn,
                      model_dir=args.model_dir)
    learner.callbacks = [
        DistributedSaveModelCallback(learner),
        TensorBoardCallback(learner)
    ]
    if num_gpus > 1:
        learner.to_distributed(get_rank())
    if args.load is not None:
        learner.load(args.load)
    if args.mode == 'train':
        learner.fit(args.epochs, args.maxlr)
    elif args.mode == 'train_oc':
        fit_one_cycle(learner, args.epochs, args.maxlr)
    else:
        raise ValueError('args.mode not supported.')
예제 #2
0
def build_learner(
    databunch,
    model,
    loss_func=nn.MSELoss(),
    pretrained_learner_fname=pretrained_learner_fname,
):
    learn = Learner(data=databunch, model=model, loss_func=loss_func)
    if pretrained_learner_fname:
        learn = learn.load(pretrained_learner_fname)
    return learn
예제 #3
0
siamese.to(device)

# In[11]:

learn = Learner(
    data,
    siamese,
    #enable_validate=True,
    path=learn_path,
    #loss_func=BCEWithLogitsFlat(),
    loss_func=ContrastiveLoss(margin=contrastive_neg_margin),
    metrics=[avg_pos_dist, avg_neg_dist]
    #metrics=[lambda preds, targs: accuracy_thresh(preds.squeeze(), targs, sigmoid=False)]
)

learn.load(f'{name}_80')

dist_mat, val_target, _ = cal_mat(learn.model,
                                  data.valid_dl,
                                  data.fix_dl,
                                  ds_with_target1=True,
                                  ds_with_target2=True)

for threshold in np.linspace(1, 9, 30):
    top5_matrix, map5 = cal_mapk(dist_mat,
                                 val_target,
                                 k=5,
                                 threshold=threshold,
                                 ref_idx2class=learn.data.fix_dl.y,
                                 target_idx2class=learn.data.valid_dl.y)
    print(threshold, map5)
예제 #4
0
def main(args):

    if args.deterministic:
        set_seed(42)

    # Set device
    if args.device is None:
        if torch.cuda.is_available():
            args.device = 'cuda:0'
        else:
            args.device = 'cpu'

    defaults.device = torch.device(args.device)

    # Aggregate path and labels into list for fastai ImageDataBunch
    fnames, labels, is_valid = [], [], []
    dataset = OpenFire(root=args.data_path,
                       train=True,
                       download=True,
                       img_folder=args.img_folder)
    for sample in dataset.data:
        fnames.append(
            dataset._images.joinpath(sample['name']).relative_to(dataset.root))
        labels.append(sample['target'])
        is_valid.append(False)
    dataset = OpenFire(root=args.data_path, train=False, download=True)
    for sample in dataset.data:
        fnames.append(
            dataset._images.joinpath(sample['name']).relative_to(dataset.root))
        labels.append(sample['target'])
        is_valid.append(True)

    df = pd.DataFrame.from_dict(
        dict(name=fnames, label=labels, is_valid=is_valid))

    # Split train and valid sets
    il = vision.ImageList.from_df(
        df, path=args.data_path).split_from_df('is_valid')
    # Encode labels
    il = il.label_from_df(cols='label',
                          label_cls=FloatList if args.binary else CategoryList)
    # Set transformations
    il = il.transform(vision.get_transforms(), size=args.resize)
    # Create the Databunch
    data = il.databunch(bs=args.batch_size,
                        num_workers=args.workers).normalize(
                            vision.imagenet_stats)
    # Metric
    metric = partial(vision.accuracy_thresh,
                     thresh=0.5) if args.binary else vision.error_rate
    # Create model
    model = models.__dict__[args.model](imagenet_pretrained=args.pretrained,
                                        num_classes=data.c,
                                        lin_features=args.lin_feats,
                                        concat_pool=args.concat_pool,
                                        bn_final=args.bn_final,
                                        dropout_prob=args.dropout_prob)
    # Create learner
    learner = Learner(data,
                      model,
                      wd=args.weight_decay,
                      loss_func=CustomBCELogitsLoss()
                      if args.binary else nn.CrossEntropyLoss(),
                      metrics=metric)

    # Form layer group for optimization
    meta = model_meta.get(args.model, _default_meta)
    learner.split(meta['split'])
    # Freeze model's head
    if args.pretrained:
        learner.freeze()

    if args.resume:
        learner.load(args.resume)
    if args.unfreeze:
        learner.unfreeze()

    learner.fit_one_cycle(args.epochs,
                          max_lr=slice(None, args.lr, None),
                          div_factor=args.div_factor,
                          final_div=args.final_div_factor)

    learner.save(args.checkpoint)
    def train(self, tmp_dir):
        """Train a model."""
        self.print_options()

        # Sync output of previous training run from cloud.
        train_uri = self.backend_opts.train_uri
        train_dir = get_local_path(train_uri, tmp_dir)
        make_dir(train_dir)
        sync_from_dir(train_uri, train_dir)

        # Get zip file for each group, and unzip them into chip_dir.
        chip_dir = join(tmp_dir, 'chips')
        make_dir(chip_dir)
        for zip_uri in list_paths(self.backend_opts.chip_uri, 'zip'):
            zip_path = download_if_needed(zip_uri, tmp_dir)
            with zipfile.ZipFile(zip_path, 'r') as zipf:
                zipf.extractall(chip_dir)

        # Setup data loader.
        train_images = []
        train_lbl_bbox = []
        for annotation_path in glob.glob(join(chip_dir, 'train/*.json')):
            images, lbl_bbox = get_annotations(annotation_path)
            train_images += images
            train_lbl_bbox += lbl_bbox

        val_images = []
        val_lbl_bbox = []
        for annotation_path in glob.glob(join(chip_dir, 'valid/*.json')):
            images, lbl_bbox = get_annotations(annotation_path)
            val_images += images
            val_lbl_bbox += lbl_bbox

        images = train_images + val_images
        lbl_bbox = train_lbl_bbox + val_lbl_bbox

        img2bbox = dict(zip(images, lbl_bbox))
        get_y_func = lambda o: img2bbox[o.name]
        num_workers = 0 if self.train_opts.debug else 4
        data = ObjectItemList.from_folder(chip_dir)
        data = data.split_by_folder()
        data = data.label_from_func(get_y_func)
        data = data.transform(
            get_transforms(), size=self.task_config.chip_size, tfm_y=True)
        data = data.databunch(
            bs=self.train_opts.batch_sz, collate_fn=bb_pad_collate,
            num_workers=num_workers)
        print(data)

        if self.train_opts.debug:
            make_debug_chips(
                data, self.task_config.class_map, tmp_dir, train_uri)

        # Setup callbacks and train model.
        ratios = [1/2, 1, 2]
        scales = [1, 2**(-1/3), 2**(-2/3)]
        model_arch = getattr(models, self.train_opts.model_arch)
        encoder = create_body(model_arch, cut=-2)
        model = RetinaNet(encoder, data.c, final_bias=-4)
        crit = RetinaNetFocalLoss(scales=scales, ratios=ratios)
        learn = Learner(data, model, loss_func=crit, path=train_dir)
        learn = learn.split(retina_net_split)

        model_path = get_local_path(self.backend_opts.model_uri, tmp_dir)

        pretrained_uri = self.backend_opts.pretrained_uri
        if pretrained_uri:
            print('Loading weights from pretrained_uri: {}'.format(
                pretrained_uri))
            pretrained_path = download_if_needed(pretrained_uri, tmp_dir)
            learn.load(pretrained_path[:-4])

        callbacks = [
            TrackEpochCallback(learn),
            SaveModelCallback(learn, every='epoch'),
            MyCSVLogger(learn, filename='log'),
            ExportCallback(learn, model_path),
            SyncCallback(train_dir, self.backend_opts.train_uri,
                         self.train_opts.sync_interval)
        ]
        learn.unfreeze()
        learn.fit(self.train_opts.num_epochs, self.train_opts.lr,
                  callbacks=callbacks)

        # Since model is exported every epoch, we need some other way to
        # show that training is finished.
        str_to_file('done!', self.backend_opts.train_done_uri)

        # Sync output to cloud.
        sync_to_dir(train_dir, self.backend_opts.train_uri)
예제 #6
0
def run_ner(
        lang: str = 'eng',
        log_dir: str = 'logs',
        task: str = NER,
        batch_size: int = 1,
        lr: float = 5e-5,
        epochs: int = 1,
        dataset: str = 'data/conll-2003/',
        loss: str = 'cross',
        max_seq_len: int = 128,
        do_lower_case: bool = False,
        warmup_proportion: float = 0.1,
        grad_acc_steps: int = 1,
        rand_seed: int = None,
        fp16: bool = False,
        loss_scale: float = None,
        ds_size: int = None,
        data_bunch_path: str = 'data/conll-2003/db',
        bertAdam: bool = False,
        freez: bool = False,
        one_cycle: bool = False,
        discr: bool = False,
        lrm: int = 2.6,
        div: int = None,
        tuned_learner: str = None,
        do_train: str = False,
        do_eval: str = False,
        save: bool = False,
        name: str = 'ner',
        mask: tuple = ('s', 's'),
):
    name = "_".join(
        map(str, [
            name, task, lang, mask[0], mask[1], loss, batch_size, lr,
            max_seq_len, do_train, do_eval
        ]))

    log_dir = Path(log_dir)
    log_dir.mkdir(parents=True, exist_ok=True)
    init_logger(log_dir, name)

    if rand_seed:
        random.seed(rand_seed)
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(rand_seed)

    trainset = dataset + lang + '/train.txt'
    devset = dataset + lang + '/dev.txt'
    testset = dataset + lang + '/test.txt'

    bert_model = 'bert-base-cased' if lang == 'eng' else 'bert-base-multilingual-cased'
    print(f'Lang: {lang}\nModel: {bert_model}\nRun: {name}')
    model = BertForTokenClassification.from_pretrained(bert_model,
                                                       num_labels=len(VOCAB),
                                                       cache_dir='bertm')

    model = torch.nn.DataParallel(model)
    model_lr_group = bert_layer_list(model)
    layers = len(model_lr_group)
    kwargs = {'max_seq_len': max_seq_len, 'ds_size': ds_size, 'mask': mask}

    train_dl = DataLoader(dataset=NerDataset(trainset,
                                             bert_model,
                                             train=True,
                                             **kwargs),
                          batch_size=batch_size,
                          shuffle=True,
                          collate_fn=partial(pad, train=True))

    dev_dl = DataLoader(dataset=NerDataset(devset, bert_model, **kwargs),
                        batch_size=batch_size,
                        shuffle=False,
                        collate_fn=pad)

    test_dl = DataLoader(dataset=NerDataset(testset, bert_model, **kwargs),
                         batch_size=batch_size,
                         shuffle=False,
                         collate_fn=pad)

    data = DataBunch(train_dl=train_dl,
                     valid_dl=dev_dl,
                     test_dl=test_dl,
                     collate_fn=pad,
                     path=Path(data_bunch_path))

    loss_fun = ner_loss_func if loss == 'cross' else partial(ner_loss_func,
                                                             zero=True)
    metrics = [Conll_F1()]

    learn = Learner(
        data,
        model,
        BertAdam,
        loss_func=loss_fun,
        metrics=metrics,
        true_wd=False,
        layer_groups=None if not freez else model_lr_group,
        path='learn',
    )

    # initialise bert adam optimiser
    train_opt_steps = int(len(train_dl.dataset) / batch_size) * epochs
    optim = BertAdam(model.parameters(),
                     lr=lr,
                     warmup=warmup_proportion,
                     t_total=train_opt_steps)

    if bertAdam: learn.opt = OptimWrapper(optim)
    else: print("No Bert Adam")

    # load fine-tuned learner
    if tuned_learner:
        print('Loading pretrained learner: ', tuned_learner)
        learn.load(tuned_learner)

    # Uncomment to graph learning rate plot
    # learn.lr_find()
    # learn.recorder.plot(skip_end=15)

    # set lr (discriminative learning rates)
    if div: layers = div
    lrs = lr if not discr else learn.lr_range(slice(lr / lrm**(layers), lr))

    results = [['epoch', 'lr', 'f1', 'val_loss', 'train_loss', 'train_losses']]

    if do_train:
        for epoch in range(epochs):
            if freez:
                lay = (layers // (epochs - 1)) * epoch * -1
                if lay == 0:
                    print('Freeze')
                    learn.freeze()
                elif lay == layers:
                    print('unfreeze')
                    learn.unfreeze()
                else:
                    print('freeze2')
                    learn.freeze_to(lay)
                print('Freezing layers ', lay, ' off ', layers)

            # Fit Learner - eg train model
            if one_cycle: learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))
            else: learn.fit(1, lrs)

            results.append([
                epoch,
                lrs,
                learn.recorder.metrics[0][0],
                learn.recorder.val_losses[0],
                np.array(learn.recorder.losses).mean(),
                learn.recorder.losses,
            ])

            if save:
                m_path = learn.save(f"{lang}_{epoch}_model", return_path=True)
                print(f'Saved model to {m_path}')
    if save: learn.export(f'{lang}.pkl')

    if do_eval:
        res = learn.validate(test_dl, metrics=metrics)
        met_res = [f'{m.__name__}: {r}' for m, r in zip(metrics, res[1:])]
        print(f'Validation on TEST SET:\nloss {res[0]}, {met_res}')
        results.append(['val', '-', res[1], res[0], '-', '-'])

    with open(log_dir / (name + '.csv'), 'a') as resultFile:
        wr = csv.writer(resultFile)
        wr.writerows(results)
예제 #7
0
            name=model_se_str),
    partial(WarmRestartsLRScheduler,
            n_cycles=args.epochs,
            lr=(args.lr, args.lr / args.lr_div),
            mom=(args.mom, 0.95),
            cycle_len=args.cycle_len,
            cycle_mult=args.cycle_mult,
            start_epoch=args.start_epoch)
]
learn = Learner(db,
                model,
                metrics=[rmse, mae],
                callback_fns=callback_fns,
                wd=args.wd,
                loss_func=contribs_rmse_loss)
if args.start_epoch > 0: learn.load(model_se_str + f'_{args.start_epoch-1}')
else: learn.load(model_str)
torch.cuda.empty_cache()
if distributed_train: learn = learn.to_distributed(args.local_rank)

learn.fit(args.epochs)

# make predictions
n_val = len(train_df[train_df['molecule_id'].isin(val_mol_ids)])
val_preds = np.zeros((n_val, args.epochs))
test_preds = np.zeros((len(test_df), args.epochs))
for m in range(args.epochs):
    print(f'Predicting for model {m}')
    learn.load(model_se_str + f'_{m}')
    val_contrib_preds = learn.get_preds(DatasetType.Valid)
    test_contrib_preds = learn.get_preds(DatasetType.Test)
예제 #8
0
callback_fns = [
    partial(GradientClipping, clip=10), GroupMeanLogMAE,
    partial(SaveModelCallback,
            every='improvement',
            mode='min',
            monitor='group_mean_log_mae',
            name=model_str)
]
learn = Learner(db,
                model,
                metrics=[rmse, mae],
                callback_fns=callback_fns,
                wd=args.wd,
                loss_func=contribs_rmse_loss)
if args.start_epoch > 0:
    learn.load(model_str)
    torch.cuda.empty_cache()
if distributed_train: learn = learn.to_distributed(args.local_rank)

learn.fit_one_cycle(args.epochs, max_lr=args.lr, start_epoch=args.start_epoch)

# make predictions
val_contrib_preds = learn.get_preds(DatasetType.Valid)
test_contrib_preds = learn.get_preds(DatasetType.Test)
val_preds = val_contrib_preds[0][:, -1].detach().numpy() * C.SC_STD + C.SC_MEAN
test_preds = test_contrib_preds[0][:,
                                   -1].detach().numpy() * C.SC_STD + C.SC_MEAN

# store results
store_submit(test_preds, model_str, print_head=True)
store_oof(val_preds, model_str, print_head=True)