Beispiel #1
0
def main(test, s3_data, batch, debug):
    """Train a semantic segmentation FPN model on the CamVid-Tiramisu dataset."""
    if batch:
        run_on_batch(test, debug)

    # Setup options
    batch_sz = 8
    num_workers = 4
    num_epochs = 20
    lr = 1e-4
    backbone_arch = 'resnet18'
    sample_pct = 1.0

    if test:
        batch_sz = 1
        num_workers = 0
        num_epochs = 2
        sample_pct = 0.01

    # Setup data
    tmp_dir_obj = tempfile.TemporaryDirectory()
    tmp_dir = tmp_dir_obj.name
    output_dir = local_output_uri
    make_dir(output_dir)

    data_dir = download_data(s3_data, tmp_dir)
    data = get_databunch(data_dir,
                         sample_pct=sample_pct,
                         batch_sz=batch_sz,
                         num_workers=num_workers)
    print(data)
    plot_data(data, output_dir)

    # Setup and train model
    num_classes = data.c
    model = SegmentationFPN(backbone_arch, num_classes)
    metrics = [acc_camvid]
    learn = Learner(data,
                    model,
                    metrics=metrics,
                    loss_func=SegmentationFPN.loss,
                    path=output_dir)
    learn.unfreeze()

    callbacks = [
        SaveModelCallback(learn, monitor='valid_loss'),
        CSVLogger(learn, filename='log'),
    ]

    learn.fit_one_cycle(num_epochs, lr, callbacks=callbacks)

    # Plot predictions and sync
    plot_preds(data, learn, output_dir)

    if s3_data:
        sync_to_dir(output_dir, remote_output_uri)
Beispiel #2
0
def main(args):

    if args.deterministic:
        set_seed(42)

    # Set device
    if args.device is None:
        if torch.cuda.is_available():
            args.device = 'cuda:0'
        else:
            args.device = 'cpu'

    defaults.device = torch.device(args.device)

    # Aggregate path and labels into list for fastai ImageDataBunch
    fnames, labels, is_valid = [], [], []
    dataset = OpenFire(root=args.data_path,
                       train=True,
                       download=True,
                       img_folder=args.img_folder)
    for sample in dataset.data:
        fnames.append(
            dataset._images.joinpath(sample['name']).relative_to(dataset.root))
        labels.append(sample['target'])
        is_valid.append(False)
    dataset = OpenFire(root=args.data_path, train=False, download=True)
    for sample in dataset.data:
        fnames.append(
            dataset._images.joinpath(sample['name']).relative_to(dataset.root))
        labels.append(sample['target'])
        is_valid.append(True)

    df = pd.DataFrame.from_dict(
        dict(name=fnames, label=labels, is_valid=is_valid))

    # Split train and valid sets
    il = vision.ImageList.from_df(
        df, path=args.data_path).split_from_df('is_valid')
    # Encode labels
    il = il.label_from_df(cols='label',
                          label_cls=FloatList if args.binary else CategoryList)
    # Set transformations
    il = il.transform(vision.get_transforms(), size=args.resize)
    # Create the Databunch
    data = il.databunch(bs=args.batch_size,
                        num_workers=args.workers).normalize(
                            vision.imagenet_stats)
    # Metric
    metric = partial(vision.accuracy_thresh,
                     thresh=0.5) if args.binary else vision.error_rate
    # Create model
    model = models.__dict__[args.model](imagenet_pretrained=args.pretrained,
                                        num_classes=data.c,
                                        lin_features=args.lin_feats,
                                        concat_pool=args.concat_pool,
                                        bn_final=args.bn_final,
                                        dropout_prob=args.dropout_prob)
    # Create learner
    learner = Learner(data,
                      model,
                      wd=args.weight_decay,
                      loss_func=CustomBCELogitsLoss()
                      if args.binary else nn.CrossEntropyLoss(),
                      metrics=metric)

    # Form layer group for optimization
    meta = model_meta.get(args.model, _default_meta)
    learner.split(meta['split'])
    # Freeze model's head
    if args.pretrained:
        learner.freeze()

    if args.resume:
        learner.load(args.resume)
    if args.unfreeze:
        learner.unfreeze()

    learner.fit_one_cycle(args.epochs,
                          max_lr=slice(None, args.lr, None),
                          div_factor=args.div_factor,
                          final_div=args.final_div_factor)

    learner.save(args.checkpoint)
        super().__init__(learn)

    def on_epoch_end(self, **kwargs):
        for _ in train_eval_loader:
            pass


# ---
databunch = DataBunch(train_loader, val_loader)

opt_func = partial(SGD, lr=0.1, momentum=0.9, weight_decay=5e-4)

learner = Learner(data=databunch,
                  model=model,
                  opt_func=opt_func,
                  loss_func=criterion,
                  metrics=[accuracy],
                  true_wd=False)

learner.unfreeze()

# ---
callback_on_train_begin = MakeRandomizerConsistentOnTrainBegin(learner)
callback_on_epoch_end = MakeRandomizerConsistentOnEpochEnd(learner)
learner.fit(epochs=150,
            lr=0.1,
            wd=5e-4,
            callbacks=[callback_on_train_begin, callback_on_epoch_end])
learner.fit(epochs=100, lr=0.01, wd=5e-4, callbacks=[callback_on_epoch_end])
learner.fit(epochs=100, lr=0.001, wd=5e-4, callbacks=[callback_on_epoch_end])
Beispiel #4
0
def run_ner(
        lang: str = 'eng',
        log_dir: str = 'logs',
        task: str = NER,
        batch_size: int = 1,
        epochs: int = 1,
        dataset: str = 'data/conll-2003/',
        loss: str = 'cross',
        max_seq_len: int = 128,
        do_lower_case: bool = False,
        warmup_proportion: float = 0.1,
        rand_seed: int = None,
        ds_size: int = None,
        data_bunch_path: str = 'data/conll-2003/db',
        tuned_learner: str = None,
        do_train: str = False,
        do_eval: str = False,
        save: bool = False,
        nameX: str = 'ner',
        mask: tuple = ('s', 's'),
):
    name = "_".join(
        map(str, [
            nameX, task, lang, mask[0], mask[1], loss, batch_size, max_seq_len,
            do_train, do_eval
        ]))
    log_dir = Path(log_dir)
    log_dir.mkdir(parents=True, exist_ok=True)
    init_logger(log_dir, name)

    if rand_seed:
        random.seed(rand_seed)
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(rand_seed)

    trainset = dataset + lang + '/train.txt'
    devset = dataset + lang + '/dev.txt'
    testset = dataset + lang + '/test.txt'

    bert_model = 'bert-base-cased' if lang == 'eng' else 'bert-base-multilingual-cased'
    print(f'Lang: {lang}\nModel: {bert_model}\nRun: {name}')
    model = BertForTokenClassification.from_pretrained(bert_model,
                                                       num_labels=len(VOCAB),
                                                       cache_dir='bertm')
    if tuned_learner:
        print('Loading pretrained learner: ', tuned_learner)
        model.bert.load_state_dict(torch.load(tuned_learner))

    model = torch.nn.DataParallel(model)
    model_lr_group = bert_layer_list(model)
    layers = len(model_lr_group)
    kwargs = {'max_seq_len': max_seq_len, 'ds_size': ds_size, 'mask': mask}

    train_dl = DataLoader(dataset=NerDataset(trainset,
                                             bert_model,
                                             train=True,
                                             **kwargs),
                          batch_size=batch_size,
                          shuffle=True,
                          collate_fn=partial(pad, train=True))

    dev_dl = DataLoader(dataset=NerDataset(devset, bert_model, **kwargs),
                        batch_size=batch_size,
                        shuffle=False,
                        collate_fn=pad)

    test_dl = DataLoader(dataset=NerDataset(testset, bert_model, **kwargs),
                         batch_size=batch_size,
                         shuffle=False,
                         collate_fn=pad)

    data = DataBunch(train_dl=train_dl,
                     valid_dl=dev_dl,
                     test_dl=test_dl,
                     collate_fn=pad,
                     path=Path(data_bunch_path))

    train_opt_steps = int(len(train_dl.dataset) / batch_size) * epochs
    optim = BertAdam(model.parameters(),
                     lr=0.01,
                     warmup=warmup_proportion,
                     t_total=train_opt_steps)

    loss_fun = ner_loss_func if loss == 'cross' else partial(ner_loss_func,
                                                             zero=True)
    metrics = [Conll_F1()]

    learn = Learner(
        data,
        model,
        BertAdam,
        loss_func=loss_fun,
        metrics=metrics,
        true_wd=False,
        layer_groups=model_lr_group,
        path='learn' + nameX,
    )

    learn.opt = OptimWrapper(optim)

    lrm = 1.6

    # select set of starting lrs
    lrs_eng = [0.01, 5e-4, 3e-4, 3e-4, 1e-5]
    lrs_deu = [0.01, 5e-4, 5e-4, 3e-4, 2e-5]

    startlr = lrs_eng if lang == 'eng' else lrs_deu
    results = [['epoch', 'lr', 'f1', 'val_loss', 'train_loss', 'train_losses']]
    if do_train:
        learn.freeze()
        learn.fit_one_cycle(1, startlr[0], moms=(0.8, 0.7))
        learn.freeze_to(-3)
        lrs = learn.lr_range(slice(startlr[1] / (1.6**15), startlr[1]))
        learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))
        learn.freeze_to(-6)
        lrs = learn.lr_range(slice(startlr[2] / (1.6**15), startlr[2]))
        learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))
        learn.freeze_to(-12)
        lrs = learn.lr_range(slice(startlr[3] / (1.6**15), startlr[3]))
        learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))
        learn.unfreeze()
        lrs = learn.lr_range(slice(startlr[4] / (1.6**15), startlr[4]))
        learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))

    if do_eval:
        res = learn.validate(test_dl, metrics=metrics)
        met_res = [f'{m.__name__}: {r}' for m, r in zip(metrics, res[1:])]
        print(f'Validation on TEST SET:\nloss {res[0]}, {met_res}')
        results.append(['val', '-', res[1], res[0], '-', '-'])

    with open(log_dir / (name + '.csv'), 'a') as resultFile:
        wr = csv.writer(resultFile)
        wr.writerows(results)
    def train(self, tmp_dir):
        """Train a model."""
        self.print_options()

        # Sync output of previous training run from cloud.
        train_uri = self.backend_opts.train_uri
        train_dir = get_local_path(train_uri, tmp_dir)
        make_dir(train_dir)
        sync_from_dir(train_uri, train_dir)

        # Get zip file for each group, and unzip them into chip_dir.
        chip_dir = join(tmp_dir, 'chips')
        make_dir(chip_dir)
        for zip_uri in list_paths(self.backend_opts.chip_uri, 'zip'):
            zip_path = download_if_needed(zip_uri, tmp_dir)
            with zipfile.ZipFile(zip_path, 'r') as zipf:
                zipf.extractall(chip_dir)

        # Setup data loader.
        train_images = []
        train_lbl_bbox = []
        for annotation_path in glob.glob(join(chip_dir, 'train/*.json')):
            images, lbl_bbox = get_annotations(annotation_path)
            train_images += images
            train_lbl_bbox += lbl_bbox

        val_images = []
        val_lbl_bbox = []
        for annotation_path in glob.glob(join(chip_dir, 'valid/*.json')):
            images, lbl_bbox = get_annotations(annotation_path)
            val_images += images
            val_lbl_bbox += lbl_bbox

        images = train_images + val_images
        lbl_bbox = train_lbl_bbox + val_lbl_bbox

        img2bbox = dict(zip(images, lbl_bbox))
        get_y_func = lambda o: img2bbox[o.name]
        num_workers = 0 if self.train_opts.debug else 4
        data = ObjectItemList.from_folder(chip_dir)
        data = data.split_by_folder()
        data = data.label_from_func(get_y_func)
        data = data.transform(
            get_transforms(), size=self.task_config.chip_size, tfm_y=True)
        data = data.databunch(
            bs=self.train_opts.batch_sz, collate_fn=bb_pad_collate,
            num_workers=num_workers)
        print(data)

        if self.train_opts.debug:
            make_debug_chips(
                data, self.task_config.class_map, tmp_dir, train_uri)

        # Setup callbacks and train model.
        ratios = [1/2, 1, 2]
        scales = [1, 2**(-1/3), 2**(-2/3)]
        model_arch = getattr(models, self.train_opts.model_arch)
        encoder = create_body(model_arch, cut=-2)
        model = RetinaNet(encoder, data.c, final_bias=-4)
        crit = RetinaNetFocalLoss(scales=scales, ratios=ratios)
        learn = Learner(data, model, loss_func=crit, path=train_dir)
        learn = learn.split(retina_net_split)

        model_path = get_local_path(self.backend_opts.model_uri, tmp_dir)

        pretrained_uri = self.backend_opts.pretrained_uri
        if pretrained_uri:
            print('Loading weights from pretrained_uri: {}'.format(
                pretrained_uri))
            pretrained_path = download_if_needed(pretrained_uri, tmp_dir)
            learn.load(pretrained_path[:-4])

        callbacks = [
            TrackEpochCallback(learn),
            SaveModelCallback(learn, every='epoch'),
            MyCSVLogger(learn, filename='log'),
            ExportCallback(learn, model_path),
            SyncCallback(train_dir, self.backend_opts.train_uri,
                         self.train_opts.sync_interval)
        ]
        learn.unfreeze()
        learn.fit(self.train_opts.num_epochs, self.train_opts.lr,
                  callbacks=callbacks)

        # Since model is exported every epoch, we need some other way to
        # show that training is finished.
        str_to_file('done!', self.backend_opts.train_done_uri)

        # Sync output to cloud.
        sync_to_dir(train_dir, self.backend_opts.train_uri)
def run_ner(
        lang: str = 'eng',
        log_dir: str = 'logs',
        task: str = NER,
        batch_size: int = 1,
        lr: float = 5e-5,
        epochs: int = 1,
        dataset: str = 'data/conll-2003/',
        loss: str = 'cross',
        max_seq_len: int = 128,
        do_lower_case: bool = False,
        warmup_proportion: float = 0.1,
        grad_acc_steps: int = 1,
        rand_seed: int = None,
        fp16: bool = False,
        loss_scale: float = None,
        ds_size: int = None,
        data_bunch_path: str = 'data/conll-2003/db',
        bertAdam: bool = False,
        freez: bool = False,
        one_cycle: bool = False,
        discr: bool = False,
        lrm: int = 2.6,
        div: int = None,
        tuned_learner: str = None,
        do_train: str = False,
        do_eval: str = False,
        save: bool = False,
        name: str = 'ner',
        mask: tuple = ('s', 's'),
):
    name = "_".join(
        map(str, [
            name, task, lang, mask[0], mask[1], loss, batch_size, lr,
            max_seq_len, do_train, do_eval
        ]))

    log_dir = Path(log_dir)
    log_dir.mkdir(parents=True, exist_ok=True)
    init_logger(log_dir, name)

    if rand_seed:
        random.seed(rand_seed)
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(rand_seed)

    trainset = dataset + lang + '/train.txt'
    devset = dataset + lang + '/dev.txt'
    testset = dataset + lang + '/test.txt'

    bert_model = 'bert-base-cased' if lang == 'eng' else 'bert-base-multilingual-cased'
    print(f'Lang: {lang}\nModel: {bert_model}\nRun: {name}')
    model = BertForTokenClassification.from_pretrained(bert_model,
                                                       num_labels=len(VOCAB),
                                                       cache_dir='bertm')

    model = torch.nn.DataParallel(model)
    model_lr_group = bert_layer_list(model)
    layers = len(model_lr_group)
    kwargs = {'max_seq_len': max_seq_len, 'ds_size': ds_size, 'mask': mask}

    train_dl = DataLoader(dataset=NerDataset(trainset,
                                             bert_model,
                                             train=True,
                                             **kwargs),
                          batch_size=batch_size,
                          shuffle=True,
                          collate_fn=partial(pad, train=True))

    dev_dl = DataLoader(dataset=NerDataset(devset, bert_model, **kwargs),
                        batch_size=batch_size,
                        shuffle=False,
                        collate_fn=pad)

    test_dl = DataLoader(dataset=NerDataset(testset, bert_model, **kwargs),
                         batch_size=batch_size,
                         shuffle=False,
                         collate_fn=pad)

    data = DataBunch(train_dl=train_dl,
                     valid_dl=dev_dl,
                     test_dl=test_dl,
                     collate_fn=pad,
                     path=Path(data_bunch_path))

    loss_fun = ner_loss_func if loss == 'cross' else partial(ner_loss_func,
                                                             zero=True)
    metrics = [Conll_F1()]

    learn = Learner(
        data,
        model,
        BertAdam,
        loss_func=loss_fun,
        metrics=metrics,
        true_wd=False,
        layer_groups=None if not freez else model_lr_group,
        path='learn',
    )

    # initialise bert adam optimiser
    train_opt_steps = int(len(train_dl.dataset) / batch_size) * epochs
    optim = BertAdam(model.parameters(),
                     lr=lr,
                     warmup=warmup_proportion,
                     t_total=train_opt_steps)

    if bertAdam: learn.opt = OptimWrapper(optim)
    else: print("No Bert Adam")

    # load fine-tuned learner
    if tuned_learner:
        print('Loading pretrained learner: ', tuned_learner)
        learn.load(tuned_learner)

    # Uncomment to graph learning rate plot
    # learn.lr_find()
    # learn.recorder.plot(skip_end=15)

    # set lr (discriminative learning rates)
    if div: layers = div
    lrs = lr if not discr else learn.lr_range(slice(lr / lrm**(layers), lr))

    results = [['epoch', 'lr', 'f1', 'val_loss', 'train_loss', 'train_losses']]

    if do_train:
        for epoch in range(epochs):
            if freez:
                lay = (layers // (epochs - 1)) * epoch * -1
                if lay == 0:
                    print('Freeze')
                    learn.freeze()
                elif lay == layers:
                    print('unfreeze')
                    learn.unfreeze()
                else:
                    print('freeze2')
                    learn.freeze_to(lay)
                print('Freezing layers ', lay, ' off ', layers)

            # Fit Learner - eg train model
            if one_cycle: learn.fit_one_cycle(1, lrs, moms=(0.8, 0.7))
            else: learn.fit(1, lrs)

            results.append([
                epoch,
                lrs,
                learn.recorder.metrics[0][0],
                learn.recorder.val_losses[0],
                np.array(learn.recorder.losses).mean(),
                learn.recorder.losses,
            ])

            if save:
                m_path = learn.save(f"{lang}_{epoch}_model", return_path=True)
                print(f'Saved model to {m_path}')
    if save: learn.export(f'{lang}.pkl')

    if do_eval:
        res = learn.validate(test_dl, metrics=metrics)
        met_res = [f'{m.__name__}: {r}' for m, r in zip(metrics, res[1:])]
        print(f'Validation on TEST SET:\nloss {res[0]}, {met_res}')
        results.append(['val', '-', res[1], res[0], '-', '-'])

    with open(log_dir / (name + '.csv'), 'a') as resultFile:
        wr = csv.writer(resultFile)
        wr.writerows(results)
                model,
                loss_func=Loss_combine(),
                opt_func=Over9000,
                metrics=[
                    Metric_grapheme(),
                    Metric_vowel(),
                    Metric_consonant(),
                    Metric_tot()
                ])

learn.model = nn.DataParallel(learn.model)

logger = CSVLogger(learn, f'log{fold}')
learn.clip_grad = 1.0
learn.split([model.head1])
learn.unfreeze()
# -

learn.summary()

# +
# learn.fit_one_cycle(32, max_lr=slice(0.2e-2,1e-2), wd=[1e-3,0.1e-1], pct_start=0.0,
#     div_factor=100, callbacks = [logger, SaveModelCallback(learn,monitor='metric_tot',
#     mode='max',name=f'model_{fold}'),MixUpCallback(learn)])

# changed config

learn.fit_one_cycle(100,
                    max_lr=slice(0.2e-2, 1.5e-2),
                    wd=[1e-4, 1e-3],
                    pct_start=0.0,