예제 #1
0
def test_early_stop():
    wide = Wide(100, 1)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        dropout=[0.5, 0.5],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    model = WideDeep(wide=wide, deepdense=deepdense)
    model.compile(
        method="binary",
        callbacks=[
            EarlyStopping(min_delta=0.1,
                          patience=3,
                          restore_best_weights=True,
                          verbose=1)
        ],
        verbose=1,
    )
    model.fit(X_wide=X_wide,
              X_deep=X_deep,
              target=target,
              val_split=0.2,
              n_epochs=5)
    # length of history = patience+1
    assert len(model.history._history["train_loss"]) == 3 + 1
예제 #2
0
def test_fit_methods(
    X_wide,
    X_deep,
    target,
    method,
    X_wide_test,
    X_deep_test,
    X_test,
    pred_dim,
    probs_dim,
):
    wide = Wide(np.unique(X_wide).shape[0], pred_dim)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        dropout=[0.5, 0.5],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    model = WideDeep(wide=wide, deepdense=deepdense, pred_dim=pred_dim)
    model.compile(method=method, verbose=0)
    model.fit(X_wide=X_wide, X_deep=X_deep, target=target)
    preds = model.predict(X_wide=X_wide, X_deep=X_deep, X_test=X_test)
    if method == "binary":
        pass
    else:
        probs = model.predict_proba(X_wide=X_wide,
                                    X_deep=X_deep,
                                    X_test=X_test)
    assert preds.shape[0] == 100, probs.shape[1] == probs_dim
예제 #3
0
def test_model_checkpoint(save_best_only, max_save, n_files):
    wide = Wide(100, 1)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        dropout=[0.5, 0.5],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    model = WideDeep(wide=wide, deepdense=deepdense)
    model.compile(
        method="binary",
        callbacks=[
            ModelCheckpoint("weights/test_weights",
                            save_best_only=save_best_only,
                            max_save=max_save)
        ],
        verbose=0,
    )
    model.fit(X_wide=X_wide,
              X_deep=X_deep,
              target=target,
              n_epochs=5,
              val_split=0.2)
    n_saved = len(os.listdir("weights/"))
    for f in os.listdir("weights/"):
        os.remove("weights/" + f)
    assert n_saved <= n_files
예제 #4
0
def test_initializers_with_pattern():

    wide = Wide(100, 1)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        dropout=[0.5, 0.5],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    deeptext = DeepText(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
    model = WideDeep(wide=wide,
                     deepdense=deepdense,
                     deeptext=deeptext,
                     pred_dim=1)
    cmodel = c(model)
    org_word_embed = []
    for n, p in cmodel.named_parameters():
        if "word_embed" in n:
            org_word_embed.append(p)
    model.compile(method="binary", verbose=0, initializers=initializers_2)
    init_word_embed = []
    for n, p in model.named_parameters():
        if "word_embed" in n:
            init_word_embed.append(p)

    assert torch.all(org_word_embed[0] == init_word_embed[0].cpu())
def test_focal_loss(X_wide, X_deep, target, method, output_dim, probs_dim):
    wide = Wide(100, output_dim)
    deepdense = model3 = DeepDense(hidden_layers=[32, 16],
                                   dropout=[0.5, 0.5],
                                   deep_column_idx=deep_column_idx,
                                   embed_input=embed_input,
                                   continuous_cols=colnames[-5:])
    model = WideDeep(wide=wide, deepdense=deepdense, output_dim=output_dim)
    model.compile(method=method, verbose=0, with_focal_loss=True)
    model.fit(X_wide=X_wide, X_deep=X_deep, target=target)
    probs = model.predict_proba(X_wide=X_wide, X_deep=X_deep)
    assert probs.shape[1] == probs_dim
def test_filepath_error():
    wide = Wide(np.unique(X_wide).shape[0], 1)
    deepdense = DeepDense(
        hidden_layers=[16, 4],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    model = WideDeep(wide=wide, deepdense=deepdense)
    with pytest.raises(ValueError):
        model.compile(
            method="binary",
            callbacks=[ModelCheckpoint(filepath="wrong_file_path")],
            verbose=0,
        )
def test_fit_with_deephead():
    wide = Wide(np.unique(X_wide).shape[0], 1)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        deep_column_idx=deep_column_idx,
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    deephead = nn.Sequential(nn.Linear(16, 8), nn.Linear(8, 4))
    model = WideDeep(wide=wide,
                     deepdense=deepdense,
                     pred_dim=1,
                     deephead=deephead)
    model.compile(method="binary", verbose=0)
    model.fit(X_wide=X_wide,
              X_deep=X_deep,
              target=target_binary,
              batch_size=16)
    preds = model.predict(X_wide=X_wide, X_deep=X_deep, X_test=X_test)
    probs = model.predict_proba(X_wide=X_wide, X_deep=X_deep, X_test=X_test)
    assert preds.shape[0] == 32, probs.shape[1] == 2
def test_initializers_1():

	wide = Wide(100, 1)
	deepdense = DeepDense(hidden_layers=[32,16], dropout=[0.5, 0.5], deep_column_idx=deep_column_idx,
	    embed_input=embed_input, continuous_cols=colnames[-5:])
	deeptext = DeepText( vocab_size=vocab_size, embed_dim=32, padding_idx=0)
	deepimage=DeepImage(pretrained=True)
	model = WideDeep(wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage, output_dim=1)
	cmodel = c(model)

	org_weights = []
	for n,p in cmodel.named_parameters():
		if n in test_layers_1: org_weights.append(p)

	model.compile(method='binary', verbose=0, initializers=initializers_1)
	init_weights = []
	for n,p in model.named_parameters():
		if n in test_layers_1: init_weights.append(p)

	res = all([torch.all((1-(a==b).int()).bool()) for a,b in zip(org_weights, init_weights)])
	assert res
def test_basic_run_with_metrics_multiclass():
    wide = Wide(np.unique(X_wide).shape[0], 3)
    deepdense = DeepDense(
        hidden_layers=[32, 16],
        dropout=[0.5, 0.5],
        deep_column_idx={k: v
                         for v, k in enumerate(colnames)},
        embed_input=embed_input,
        continuous_cols=colnames[-5:],
    )
    model = WideDeep(wide=wide, deepdense=deepdense, pred_dim=3)
    model.compile(method="multiclass", metrics=[Accuracy], verbose=False)
    model.fit(
        X_wide=X_wide,
        X_deep=X_deep,
        target=target_multi,
        n_epochs=1,
        batch_size=16,
        val_split=0.2,
    )
    assert ("train_loss" in model.history._history.keys()
            and "train_acc" in model.history._history.keys())
예제 #10
0
deep_column_idx={k:v for v,k in enumerate(colnames)}
X_deep = np.vstack(embed_cols+cont_cols).transpose()

# Text Array
padded_sequences = np.random.choice(np.arange(1,100), (100, 48))
vocab_size = 1000
X_text = np.hstack((np.repeat(np.array([[0,0]]), 100, axis=0), padded_sequences))

# target
target = np.random.choice(2, 100)

###############################################################################
# Test that history saves the information adequately
###############################################################################
wide = Wide(100, 1)
deepdense = DeepDense(hidden_layers=[32,16], dropout=[0.5, 0.5], deep_column_idx=deep_column_idx,
    embed_input=embed_input, continuous_cols=colnames[-5:])
model = WideDeep(wide=wide, deepdense=deepdense)

wide_opt_1 = torch.optim.Adam(model.wide.parameters())
deep_opt_1 = torch.optim.Adam(model.deepdense.parameters())
wide_sch_1 = StepLR(wide_opt_1, step_size=4)
deep_sch_1 = CyclicLR(deep_opt_1, base_lr=0.001, max_lr=0.01, step_size_up=10, cycle_momentum=False)
optimizers_1 = {'wide': wide_opt_1, 'deepdense': deep_opt_1}
lr_schedulers_1 = {'wide': wide_sch_1, 'deepdense': deep_sch_1}

wide_opt_2 = torch.optim.Adam(model.wide.parameters())
deep_opt_2 = RAdam(model.deepdense.parameters())
wide_sch_2 = StepLR(wide_opt_2, step_size=4)
deep_sch_2 = StepLR(deep_opt_2, step_size=4)
optimizers_2 = { 'wide': wide_opt_2, 'deepdense':deep_opt_2}
lr_schedulers_2 = {'wide': wide_sch_2,'deepdense': deep_sch_2}
예제 #11
0
    df.drop("yield", axis=1, inplace=True)
    target = "yield_cat"

    target = np.array(df[target].values)
    prepare_wide = WidePreprocessor(wide_cols=wide_cols,
                                    crossed_cols=crossed_cols)
    X_wide = prepare_wide.fit_transform(df)

    prepare_deep = DensePreprocessor(embed_cols=cat_embed_cols,
                                     continuous_cols=continuous_cols)
    X_deep = prepare_deep.fit_transform(df)
    wide = Wide(wide_dim=X_wide.shape[1], pred_dim=3)
    deepdense = DeepDense(
        hidden_layers=[64, 32],
        dropout=[0.2, 0.2],
        deep_column_idx=prepare_deep.deep_column_idx,
        embed_input=prepare_deep.embeddings_input,
        continuous_cols=continuous_cols,
    )
    model = WideDeep(wide=wide, deepdense=deepdense, pred_dim=3)
    model.compile(method="multiclass", metrics=[Accuracy, F1Score])

    model.fit(
        X_wide=X_wide,
        X_deep=X_deep,
        target=target,
        n_epochs=1,
        batch_size=32,
        val_split=0.2,
    )
# image
X_image = torch.rand(100, 3, 28, 28)

# Define the model components

# wide
wide = Wide(10, 1)
if use_cuda:
    wide.cuda()

# deep
deepdense = DeepDense(
    hidden_layers=[16, 8],
    dropout=[0.5, 0.2],
    deep_column_idx=deep_column_idx,
    embed_input=embed_input,
    continuous_cols=continuous_cols,
)
deepdense = nn.Sequential(deepdense, nn.Linear(8, 1))
if use_cuda:
    deepdense.cuda()

# text
deeptext = TestDeepText()
if use_cuda:
    deeptext.cuda()

# image
deepimage = TestDeepImage()
if use_cuda:
예제 #13
0
    X_deep_tr,
    X_deep_val,
    X_text_tr,
    X_text_val,
    X_img_tr,
    X_img_val,
    y_train,
    y_val,
) = train_test_split(X_wide, X_deep, X_text, X_img, target)

# build model components
wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
    hidden_layers=[32, 16],
    dropout=[0.5, 0.5],
    deep_column_idx={k: v
                     for v, k in enumerate(colnames)},
    embed_input=embed_input,
    continuous_cols=colnames[-5:],
)
deeptext = DeepText(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
deepimage = DeepImage(pretrained=True)

# transforms
mean = [0.406, 0.456, 0.485]  # BGR
std = [0.225, 0.224, 0.229]  # BGR
transforms1 = [ToTensor, Normalize(mean=mean, std=std)]
transforms2 = [Normalize(mean=mean, std=std)]


##############################################################################
# Test many possible scenarios of data inputs I can think off. Surely users
from torch import nn

from pytorch_widedeep.models import (
    Wide,
    DeepText,
    WideDeep,
    DeepDense,
    DeepImage,
)

embed_input = [(u, i, j)
               for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
deep_column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
wide = Wide(10, 1)
deepdense = DeepDense(hidden_layers=[16, 8],
                      deep_column_idx=deep_column_idx,
                      embed_input=embed_input)
deeptext = DeepText(vocab_size=100, embed_dim=8)
deepimage = DeepImage(pretrained=False)

###############################################################################
#  test raising 'output dim errors'
###############################################################################


@pytest.mark.parametrize(
    "deepcomponent, component_name",
    [
        (None, "dense"),
        (deeptext, "text"),
        (deepimage, "image"),
colnames = list(string.ascii_lowercase)[:10]
embed_cols = [np.random.choice(np.arange(5), 10) for _ in range(5)]
cont_cols = [np.random.rand(10) for _ in range(5)]

X_deep = torch.from_numpy(np.vstack(embed_cols + cont_cols).transpose())
X_deep_emb = X_deep[:, :5]
X_deep_cont = X_deep[:, 5:]

###############################################################################
# Embeddings and NO continuous_cols
###############################################################################
embed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]
model1 = DeepDense(hidden_layers=[32, 16],
                   dropout=[0.5, 0.2],
                   deep_column_idx={k: v
                                    for v, k in enumerate(colnames[:5])},
                   embed_input=embed_input)


def test_deep_dense_embed():
    out = model1(X_deep_emb)
    assert out.size(0) == 10 and out.size(1) == 16


###############################################################################
# Continous cols but NO embeddings
###############################################################################
continuous_cols = colnames[-5:]
model2 = DeepDense(hidden_layers=[32, 16],
                   dropout=[0.5, 0.2],