예제 #1
0
                        num_embeddings,
                        embedding_dims,
                        num_nodes,
                        out_features,
                        batch_norm=batch_norm,
                        dropout=dropout,
                        output_bias=output_bias)
    net = net.to(device)

    if args.optimizer == 'AdamWR':
        model = CoxPH(net,
                      optimizer=tt.optim.AdamWR(
                          lr=args.lr,
                          decoupled_weight_decay=args.weight_decay,
                          cycle_eta_multiplier=0.8))
    lrfinder = model.lr_finder(x_train, y_train, batch_size, tolerance=10)
    lr = lrfinder.get_best_lr()
    model.optimizer.set_lr(lr)

    wandb.init(
        project=args.dataset + '_baseline',
        group='deepsurv' + '_' + args.optimizer,
        name=
        f'L{args.num_layers}N{args.num_nodes}D{args.dropout}W{args.weight_decay}B{args.batch_size}',
        config=args)

    wandb.watch(net)

    # Loss configuration ============================================================

    # Training ======================================================================
예제 #2
0
x_mapper = DataFrameMapper(standardize + leave)


X_train = pd.DataFrame(data=x_mapper.fit_transform(X_train),
                       columns=numerical_columns+categorical_columns,
                       index=X_train.index)
# x_test = x_mapper.transform(X_test)




in_features = X_train.shape[1]
num_nodes = [32, 16, 8]
out_features = 1
batch_norm = True
dropout = 0.1
output_bias = False

net = tt.practical.MLPVanilla(in_features, num_nodes, out_features, batch_norm,
                              dropout, output_bias=output_bias)

from pycox.models import CoxPH
model = CoxPH(net, tt.optim.Adam)
batch_size = 256
lrfinder = model.lr_finder(X_train.values.astype('float32'), y_train, batch_size, tolerance=10)
_ = lrfinder.plot()
tt.callbacks.EarlyStopping(patience=20)
plt.show()

print()