예제 #1
0
def get_log_normalized_dls(train, test, bs=1024):
    """Get lognormalized DataLoaders from train and test DataFrames.

    Parameters
    ----------
    train : DataFrame
        Training data.
    test : DataFrame
        Test data.
    bs : int
        Batch size.

    Returns
    -------
    (DataLoader, DataLoader)
        Train and test DataLoaders.

    """
    train, test = log_normalize(train, test)
    train_x = train
    test_x = test
    train_y = train_x  # y = x since we are building and AE
    test_y = test_x

    train_ds = TensorDataset(torch.tensor(train_x.values, dtype=torch.float),
                             torch.tensor(train_y.values, dtype=torch.float))
    valid_ds = TensorDataset(torch.tensor(test_x.values, dtype=torch.float),
                             torch.tensor(test_y.values, dtype=torch.float))
    train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
    return train_dl, valid_dl
예제 #2
0
def db_from_df(train, test, bs=1024):
    # Create TensorDatasets
    train_ds = TensorDataset(torch.tensor(train.values),
                             torch.tensor(train.values))
    valid_ds = TensorDataset(torch.tensor(test.values),
                             torch.tensor(test.values))
    # Create DataLoaders
    train_dl, valid_dl = get_data(train_ds, valid_ds, bs=bs)
    # Return DataBunch
    return basic_data.DataBunch(train_dl, valid_dl)
예제 #3
0
x = train[variables].values
x_scaled = StandardScaler().fit_transform(x)
train[variables] = x_scaled

x = test[variables].values
x_scaled = StandardScaler().fit_transform(x)
test[variables] = x_scaled

# Create TensorDatasets
train_ds = TensorDataset(torch.tensor(train.values, dtype=torch.float),
                         torch.tensor(train.values, dtype=torch.float))
valid_ds = TensorDataset(torch.tensor(test.values, dtype=torch.float),
                         torch.tensor(test.values, dtype=torch.float))

# Create DataLoaders
train_dl, valid_dl = get_data(train_ds, valid_ds, bs=bs)

# Return DataBunch
db = basic_data.DataBunch(train_dl, valid_dl)

loss_func = nn.MSELoss()

bn_wd = False  # Don't use weight decay for batchnorm layers
true_wd = True  # wd will be used for all optimizers


def train_model(model, epochs, lr, wd, module_string, ct, path):
    plt.close('all')
    learn = basic_train.Learner(data=db,
                                model=model,
                                loss_func=loss_func,