def train(x_idx, x_value, label, features): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") X_idx_tensor = torch.LongTensor(x_idx).to(device) X_value_tensor = torch.Tensor(x_value).to(device) y_tensor = torch.Tensor(label).to(device) y_tensor = y_tensor.reshape(-1, 1) X = TensorDataset(X_idx_tensor, X_value_tensor, y_tensor) model = xDeepFM(features.feature_size(), features.field_size(), dropout_deep=[0, 0, 0], deep_layer_sizes=[400, 400], cin_layer_sizes=[100, 100, 50], embedding_size=6).to(device) optimizer = torch.optim.Adam(model.parameters()) model_path = os.path.join(pwd_path, 'xdeepfm_model.pt') model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(), optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=40, patience=10) print(loss_history)
def train(x_idx, x_value, label, features, out_type='binary'): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") X_idx_tensor = torch.LongTensor(x_idx).to(device) X_value_tensor = torch.Tensor(x_value).to(device) y_tensor = torch.Tensor(label).to(device) y_tensor = y_tensor.reshape(-1, 1) X = TensorDataset(X_idx_tensor, X_value_tensor, y_tensor) model = DeepFM(feature_size=features.feature_size(), field_size=features.field_size(), out_type=out_type).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) model_path = os.path.join(pwd_path, 'deepfm_model.pt') model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(), optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=10, shuffle=True) print(loss_history)
def train(x_idx, x_value, label): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") X_idx_tensor = torch.LongTensor(x_idx).to(device) X_value_tensor = torch.Tensor(x_value).to(device) y_tensor = torch.Tensor(label).to(device) y_tensor = y_tensor.reshape(-1, 1) X = TensorDataset(X_idx_tensor, y_tensor) model = GBDTLR().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) model_path = os.path.join(pwd_path, 'gbdtlr_model.pt') model.train_gbdt(x_idx.values.tolist(), label.values.tolist()) model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(), optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=10) print(loss_history)
def train(x_idx, x_value, label, features, categorical_index, continuous_value, out_type='binary'): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") X_idx_tensor = torch.LongTensor(x_idx).to(device) X_value_tensor = torch.Tensor(x_value).to(device) y_tensor = torch.Tensor(label).to(device) y_tensor = y_tensor.reshape(-1, 1) continuous_value = torch.FloatTensor(continuous_value).to(device) categorical_index = torch.FloatTensor(categorical_index).to(device) X = TensorDataset(continuous_value, categorical_index, y_tensor) model = WideAndDeep(feature_size=features.feature_size(), categorical_field_size=26, continuous_field_size=13, out_type=out_type).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) model_path = os.path.join(pwd_path, 'wad_model.pt') model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(), optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=10, shuffle=True) print(loss_history)