コード例 #1
0
    tab_preprocessor = TabPreprocessor(
        embed_cols=cat_embed_cols,
        continuous_cols=continuous_cols  # type: ignore[arg-type]
    )
    X_deep = tab_preprocessor.fit_transform(df)

    wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=3)
    deepdense = TabMlp(
        mlp_hidden_dims=[64, 32],
        mlp_dropout=[0.2, 0.2],
        column_idx=tab_preprocessor.column_idx,
        embed_input=tab_preprocessor.embeddings_input,
        continuous_cols=continuous_cols,
    )
    model = WideDeep(wide=wide, deeptabular=deepdense, pred_dim=3)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.03)

    trainer = wd.Trainer(model,
                         objective="multiclass",
                         metrics=[Accuracy, F1Score],
                         optimizers=optimizer)

    trainer.fit(
        X_wide=X_wide,
        X_tab=X_deep,
        target=target,
        n_epochs=1,
        batch_size=32,
        val_split=0.2,
    )
コード例 #2
0
###############################################################################
# Test that history saves the information adequately
###############################################################################
wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
    hidden_layers=[32, 16],
    dropout=[0.5, 0.5],
    deep_column_idx=deep_column_idx,
    embed_input=embed_input,
    continuous_cols=colnames[-5:],
)
model = WideDeep(wide=wide, deepdense=deepdense)

# 1. Single optimizers_1, single scheduler, not cyclic and both passed directly
optimizers_1 = RAdam(model.parameters())
lr_schedulers_1 = StepLR(optimizers_1, step_size=4)

# 2. Multiple optimizers, single scheduler, cyclic and pass via a 1 item
# dictionary
wide_opt_2 = torch.optim.Adam(model.wide.parameters())
deep_opt_2 = RAdam(model.deepdense.parameters())
deep_sch_2 = CyclicLR(deep_opt_2,
                      base_lr=0.001,
                      max_lr=0.01,
                      step_size_up=5,
                      cycle_momentum=False)
optimizers_2 = {"wide": wide_opt_2, "deepdense": deep_opt_2}
lr_schedulers_2 = {"deepdense": deep_sch_2}

# 3. Multiple schedulers no cyclic
コード例 #3
0
        def objective(params):

            deeptabular = TabMlp(
                column_idx=prepare_tab.column_idx,
                mlp_hidden_dims=mlp_hidden_dims,
                mlp_activation=args.mlp_activation,
                mlp_dropout=args.mlp_dropout,
                mlp_batchnorm=args.mlp_batchnorm,
                mlp_batchnorm_last=args.mlp_batchnorm_last,
                mlp_linear_first=args.mlp_linear_first,
                embed_input=prepare_tab.embeddings_input,
                embed_dropout=args.embed_dropout,
            )
            model = WideDeep(deeptabular=deeptabular)

            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)

            lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                optimizer,
                mode=args.rop_mode,
                factor=args.rop_factor,
                patience=args.rop_patience,
                threshold=args.rop_threshold,
                threshold_mode=args.rop_threshold_mode,
            )

            early_stopping = EarlyStopping(
                monitor=args.monitor,
                min_delta=args.early_stop_delta,
                patience=args.early_stop_patience,
            )
            trainer = Trainer(
                model,
                objective="binary_focal_loss",
                optimizers=optimizer,
                lr_schedulers=lr_scheduler,
                reducelronplateau_criterion=args.monitor.split("_")[-1],
                callbacks=[early_stopping,
                           LRHistory(n_epochs=args.n_epochs)],
                metrics=[Accuracy, F1Score],
                alpha=params["alpha"],
                gamma=params["gamma"],
                verbose=0,
            )

            trainer.fit(
                X_train={
                    "X_tab": X_train,
                    "target": y_train
                },
                X_val={
                    "X_tab": X_valid,
                    "target": y_valid
                },
                n_epochs=args.n_epochs,
                batch_size=args.batch_size,
                validation_freq=args.eval_every,
            )

            score = early_stopping.best

            return score
コード例 #4
0
###############################################################################
# Test that history saves the information adequately
###############################################################################
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(
    mlp_hidden_dims=[32, 16],
    mlp_dropout=[0.5, 0.5],
    column_idx=column_idx,
    embed_input=embed_input,
    continuous_cols=colnames[-5:],
)
model = WideDeep(wide=wide, deeptabular=deeptabular)

# 1. Single optimizers_1, single scheduler, not cyclic and both passed directly
optimizers_1 = RAdam(model.parameters())
lr_schedulers_1 = StepLR(optimizers_1, step_size=4)

# 2. Multiple optimizers, single scheduler, cyclic and pass via a 1 item
# dictionary
wide_opt_2 = torch.optim.Adam(model.wide.parameters())
deep_opt_2 = RAdam(model.deeptabular.parameters())
deep_sch_2 = CyclicLR(
    deep_opt_2, base_lr=0.001, max_lr=0.01, step_size_up=5, cycle_momentum=False
)
optimizers_2 = {"wide": wide_opt_2, "deeptabular": deep_opt_2}
lr_schedulers_2 = {"deeptabular": deep_sch_2}

# 3. Multiple schedulers no cyclic
wide_opt_3 = torch.optim.Adam(model.wide.parameters())
deep_opt_3 = RAdam(model.deeptabular.parameters())