def train_deepsurv(x_train, y_train, x_test, y_test, **kwargs):
        # Standardize the datasets
        train_mean = x_train.mean(axis=0)
        train_std = x_train.std(axis=0)

        x_train = (x_train - train_mean) / train_std
        x_test = (x_test - train_mean) / train_std

        train_data = format_to_deepsurv(x_train, y_train)
        valid_data = format_to_deepsurv(x_test, y_test)

        hyperparams = get_hyperparams(kwargs)

        # Set up Tensorboard loggers
        # TODO improve the model_id for Tensorboard to better partition runs
        model_id = str(hash(str(hyperparams)))
        run_id = model_id + '_' + str(uuid.uuid4())
        logger = TensorboardLogger(
            'hyperparam_search',
            os.path.join(logdir, "tensor_logs", model_id, run_id))

        network = deepsurv.DeepSurv(n_in=x_train.shape[1], **hyperparams)
        metrics = network.train(train_data,
                                n_epochs=num_epochs,
                                logger=logger,
                                update_fn=update_fn,
                                verbose=False)

        result = network.get_concordance_index(**valid_data)
        main_logger.info(
            'Run id: %s | %s | C-Index: %f | Train Loss %f' %
            (run_id, str(hyperparams), result, metrics['loss'][-1][1]))
        return result
예제 #2
0
hyperparams = {
    'L2_reg': 10.0,
    'batch_norm': True,
    'dropout': 0.4,
    'hidden_layers_sizes': [25, 25],
    'learning_rate': 1e-05,
    'lr_decay': 0.001,
    'momentum': 0.9,
    'n_in': train_data['x'].shape[1],
    'standardize': True
}

# enable tensorboard
experiment_name = 'test_experiment_sebastian'
logdir = 'logs/tensorboard/'
logger = TensorboardLogger(experiment_name, logdir = logdir)

# create an instance of DeepSurv using the hyperparams defined above
model = deep_surv.DeepSurv(**hyperparams)

# the type of optimizer to use
update_fn = lasagne.updates.nesterov_momentum
# check out http://lasagne.readthedocs.io/en/latest/modules/updates.html
# for other optimizers to use

n_epochs = 10001

# train the model
metrics = model.train(train_data, test_data, n_epochs = n_epochs,
                      logger = logger, update_fn = update_fn)
예제 #3
0
    args = parse_args()
    print("Arguments:",args)

    # Load Dataset
    print("Loading datasets: " + args.dataset)
    datasets = utils.load_datasets(args.dataset)
    norm_vals = {
            'mean' : datasets['train']['x'].mean(axis =0),
            'std'  : datasets['train']['x'].std(axis=0)
        }

    # Train Model

    # TODO standardize location of logs + results => have them go into same directory with same UUID of experiment
    tensor_log_dir = "/shared/data/logs/tensorboard_" + str(args.dataset) + "_" + str(uuid.uuid4())
    logger = TensorboardLogger("experiments.deep_surv", tensor_log_dir, update_freq = 10)
    model = deep_surv.load_model_from_json(args.model, args.weights)
    if 'valid' in datasets:
        valid_data = datasets['valid']
    else:
        valid_data = None
    metrics = model.train(datasets['train'], valid_data, n_epochs = args.num_epochs, logger=logger,
        update_fn = utils.get_optimizer_from_str(args.update_fn),
        validation_frequency = 100)

    # Evaluate Model
    with open(args.model, 'r') as fp:
        json_model = fp.read()
        hyperparams = json.loads(json_model)

    train_data = datasets['train']