Esempio n. 1
0
def train(
    sentences_train,
    labels_train,
    sentences_valid,
    labels_valid,
    batch_size=128,
    n_epochs=10,
):
    train_dataset = data.TensorDataset(sentences_train, labels_train)
    valid_dataset = data.TensorDataset(sentences_valid, labels_valid)

    model = Network()

    train_loader = data.DataLoader(
        train_dataset, batch_size=batch_size, shuffle=True, pin_memory=False
    )
    valid_loader = data.DataLoader(
        valid_dataset, batch_size=batch_size, shuffle=False, pin_memory=False
    )

    databunch = DataBunch(train_dl=train_loader, valid_dl=valid_loader)
    learn = Learner(databunch, model, loss_func=loss)

    if torch.cuda.is_available():
        learn = learn.to_fp16()

    learn.fit_one_cycle(n_epochs)

    return learn.model
learner = Learner(data, model, loss_func=F.cross_entropy, metrics=[accuracy])
learner.clip = 0.1 # gradient is clipped to be in range of [-0.1, 0.1]

# Find best learning rate
learner.lr_find()
learner.recorder.plot() # select lr with largest negative gradient (about 5e-3)

# Training
epochs = 1
lr = 5e-3
wd = 1e-4

import time

t0 = time.time()
learner.fit_one_cycle(epochs, lr, wd=wd) # wd is the lambda in l2 regularization
t1 = time.time()

print('time: ', t1-t0)

# training process diagnostics
learner.recorder.plot_lr()
learner.recorder.plot_losses()
learner.recorder.plot_metrics()

torch.save(model.state_dict(), 'cifar10-wrn22.pth')

hyper_params = {
	'arch': str(model),
    'num_epochs': epochs,
    'opt_func': 'Adam',
Esempio n. 3
0

data = DataBunch.create(train_ds, valid_ds, bs=batch_size, path='./data/cifar10')
learner = Learner(data, model, loss_func=F.cross_entropy, metrics=[accuracy])
learner.clip = 0.1


# this starts with a low lr then adjusts it and tracks the loss
learner.lr_find()


# plot the marked lr that gives the fastest reduction in loss
learner.recorder.plot()


learner.fit_one_cycle(9, 5e-3, wd=1e-4) # epochs, lr, weight decay


# plot all the weights, losses and accuracy of the model
learner.recorder.plot_lr()

learner.recorder.plot_losses()

learner.recorder.plot_metrics()


# save the model for future use
torch.save(model.state_dict(), 'cifar10-wrn22.pth')


            vocab=fastai_bert_vocab,
            include_bos=False,
            include_eos=False,
            text_cols='comment_text',
            label_cols=label_cols,
            bs=BATCH_SIZE,
            collate_fn=partial(pad_collate, pad_first=False, pad_idx=0),
        )

        learner = Learner(databunch, bert_model, loss_func=bert_custom_loss)
        if CUR_STEP != 1:
            learner.load('/kaggle/input/freeze-bert-1-s-uc-260ml-3e-8f-s-' +
                         str(CUR_STEP - 1) + '-f-' + str(MAKE_FOLD) +
                         '/models/' + FILE_NAME)

        learner.fit_one_cycle(N_EPOCH, max_lr=MAX_LR)

        oof[val_idx] = get_preds_as_nparray(DatasetType.Valid).astype(
            np.float32)
        predictions += get_preds_as_nparray(DatasetType.Test).astype(
            np.float32) / NFOLDS

        validate_df(train.iloc[val_idx], oof[val_idx, 0], verbose=True)

        learner.save(FILE_NAME)

print('CV BIASED AUC:')
validate_df(train, oof[:, 0], verbose=True)

train_results = pd.DataFrame(np.column_stack((train['id'], oof)),
                             columns=['id'] +
Esempio n. 5
0
def train(train_dataset: torch.utils.data.Dataset,
          test_dataset: torch.utils.data.Dataset,
          training_config: dict = train_config,
          global_config: dict = global_config):
    """
    Template training routine. Takes a training and a test dataset wrapped
    as torch.utils.data.Dataset type and two corresponding generic
    configs for both gobal path settings and training settings.
    Returns the fitted fastai.train.Learner object which can be
    used to assess the resulting metrics and error curves etc.
    """

    for path in global_config.values():
        create_dirs(path)

    # wrap datasets with Dataloader classes
    train_loader = torch.utils.data.DataLoader(
        train_dataset, **train_config["DATA_LOADER_CONFIG"])
    test_loader = torch.utils.data.DataLoader(
        test_dataset, **train_config["DATA_LOADER_CONFIG"])
    databunch = DataBunch(train_loader, test_loader)

    # instantiate model and learner
    if training_config["WEIGHTS"] is None:
        model = training_config["MODEL"](**training_config["MODEL_CONFIG"])
    else:
        model = load_model(training_config["MODEL"],
                           training_config["MODEL_CONFIG"],
                           training_config["WEIGHTS"],
                           training_config["DEVICE"])

    learner = Learner(databunch,
                      model,
                      metrics=train_config["METRICS"],
                      path=global_config["ROOT_PATH"],
                      model_dir=global_config["WEIGHT_DIR"],
                      loss_func=train_config["LOSS"])

    # model name & paths
    name = "_".join([train_config["DATE"], train_config["SESSION_NAME"]])
    modelpath = os.path.join(global_config["WEIGHT_DIR"], name)

    if train_config["MIXED_PRECISION"]:
        learner.to_fp16()

    learner.save(modelpath)

    torch.backends.cudnn.benchmark = True

    cbs = [
        SaveModelCallback(learner),
        LearnerTensorboardWriter(
            learner,
            Path(os.path.join(global_config["LOG_DIR"]), "tensorboardx"),
            name),
        TerminateOnNaNCallback()
    ]

    # perform training iteration
    try:
        if train_config["ONE_CYCLE"]:
            learner.fit_one_cycle(train_config["EPOCHS"],
                                  max_lr=train_config["LR"],
                                  callbacks=cbs)
        else:
            learner.fit(train_config["EPOCHS"],
                        lr=train_config["LR"],
                        callbacks=cbs)
    # save model files
    except KeyboardInterrupt:
        learner.save(modelpath)
        raise KeyboardInterrupt

    learner.save(modelpath)
    val_loss = min(learner.recorder.val_losses)
    val_metrics = learner.recorder.metrics

    # log using the logging tool
    logger = log.Log(train_config, run_name=train_config['SESSION_NAME'])
    logger.log_metric('Validation Loss', val_loss)
    logger.log.metrics(val_metrics)
    logger.end_run()

    #write csv log file
    log_content = train_config.copy()
    log_content["VAL_LOSS"] = val_loss
    log_content["VAL_METRICS"] = val_metrics
    log_path = os.path.join(global_config["LOG_DIR"], train_config["LOGFILE"])
    write_log(log_path, log_content)

    return learner, log_content, name
Esempio n. 6
0
import config
from dataset import Dataset
from model import WideResNet22
from fastai.train import Learner
from fastai.metrics import accuracy
from torch.nn import functional as f
from fastai.basic_data import DataBunch

cifar10 = Dataset()
# cifar10.download_dataset()
train_dataloader, valid_dataloader = cifar10.get_dataloader()
model = WideResNet22(3, 10)

data = DataBunch(train_dataloader, valid_dataloader)
learner = Learner(data, model, loss_func=f.cross_entropy, metrics=[accuracy])
learner.clip = 0.1
learner.fit_one_cycle(config.EPOCHS, config.LEARNING_RATE, wd=1e-4)