Example #1
0
def run(ex, train_filename, test_filename, n_folds, i_test_fold,
        valid_set_fraction, use_validation_set, low_cut_hz, model_name,
        optimizer_name, init_lr, scheduler_name, use_norm_constraint, restarts,
        weight_decay, schedule_weight_decay, max_epochs, max_increase_epochs,
        np_th_seed, debug):
    kwargs = locals()
    kwargs.pop('ex')
    import sys
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG,
                        stream=sys.stdout)
    start_time = time.time()
    ex.info['finished'] = False

    # check that gpu is available -> should lead to crash if gpu not there
    confirm_gpu_availability()

    exp = run_4_sec_exp(**kwargs)
    end_time = time.time()
    last_row = exp.epochs_df.iloc[-1]
    end_time = time.time()
    run_time = end_time - start_time
    ex.info['finished'] = True

    for key, val in last_row.iteritems():
        ex.info[key] = float(val)
    ex.info['runtime'] = run_time
    save_pkl_artifact(ex, exp.epochs_df, 'epochs_df.pkl')
    save_pkl_artifact(ex, exp.before_stop_df, 'before_stop_df.pkl')
Example #2
0
def run(
    ex,
    debug,
    subject_id,
    constant_memory,
    data_zero_init,
    max_epochs,
    set_distribution_to_empirical,
    ot_on_class_dims,
    independent_class_dists,
    half_before,
    n_sensors,
    final_hz,
    start_ms,
    stop_ms,
    model_name,
    final_fft,
    clf_loss,
    save_model,
    only_return_exp,
):
    kwargs = locals()
    kwargs.pop("ex")
    kwargs.pop("only_return_exp")
    kwargs.pop("save_model")
    th.backends.cudnn.benchmark = True
    import sys

    logging.basicConfig(
        format="%(asctime)s %(levelname)s : %(message)s",
        level=logging.DEBUG,
        stream=sys.stdout,
    )
    start_time = time.time()
    ex.info["finished"] = False
    confirm_gpu_availability()
    epochs_df, feature_model, class_dist = run_exp(**kwargs)

    end_time = time.time()
    run_time = end_time - start_time
    ex.info["finished"] = True
    last_row = epochs_df.iloc[-1]
    for key, val in last_row.iteritems():
        ex.info[key] = float(val)
    ex.info["runtime"] = run_time
    save_pkl_artifact(ex, epochs_df, "epochs_df.pkl")
    if save_model:
        save_torch_artifact(ex, feature_model, "feature_model.pkl")
        save_torch_artifact(ex, class_dist, "class_dist.pkl")

    print("Finished!")
def run(
    ex,
    debug,
    subject_id,
    max_epochs,
    n_sensors,
    final_hz,
    half_before,
    start_ms,
    stop_ms,
    model,
    save_model,
    weight_decay,
    only_return_exp,
    final_fft,
    add_bnorm,
    act_norm,
):
    kwargs = locals()
    kwargs.pop("ex")
    kwargs.pop("only_return_exp")
    kwargs.pop("save_model")
    th.backends.cudnn.benchmark = True
    import sys

    logging.basicConfig(
        format="%(asctime)s %(levelname)s : %(message)s",
        level=logging.DEBUG,
        stream=sys.stdout,
    )
    start_time = time.time()
    ex.info["finished"] = False
    confirm_gpu_availability()
    epochs_df, model = run_exp(**kwargs)

    end_time = time.time()
    run_time = end_time - start_time
    ex.info["finished"] = True
    last_row = epochs_df.iloc[-1]
    for key, val in last_row.iteritems():
        ex.info[key] = float(val)
    ex.info["runtime"] = run_time
    save_pkl_artifact(ex, epochs_df, "epochs_df.pkl")
    if save_model:
        save_torch_artifact(ex, model, "model.pkl")

    print("Finished!")
def run(ex, test_on_eval, sensor_types, n_chans, max_recording_mins,
        n_recordings, sec_to_cut_at_start, sec_to_cut_at_end,
        duration_recording_mins, test_recording_mins, max_abs_val,
        clip_before_resample, sampling_freq, divisor, n_folds, i_test_fold,
        shuffle, merge_train_valid, model_name, input_time_length,
        final_conv_length, stride_before_pool, n_start_chans, n_chan_factor,
        optimizer, learning_rate, weight_decay, scheduler, model_constraint,
        batch_size, max_epochs, save_predictions, save_crop_predictions,
        np_th_seed, only_return_exp):
    log_dir = ex.observers[0].dir
    kwargs = locals()
    kwargs.pop('ex')
    kwargs.pop('save_predictions')
    kwargs.pop('save_crop_predictions')
    import sys
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG,
                        stream=sys.stdout)
    start_time = time.time()
    ex.info['finished'] = False
    confirm_gpu_availability()

    exp = run_exp(**kwargs)
    end_time = time.time()
    run_time = end_time - start_time
    ex.info['finished'] = True

    if not only_return_exp:
        last_row = exp.epochs_df.iloc[-1]
        for key, val in last_row.iteritems():
            ex.info[key] = float(val)
    ex.info['runtime'] = run_time
    if not only_return_exp:
        save_pkl_artifact(ex, exp.epochs_df, 'epochs_df.pkl')
        save_pkl_artifact(ex, exp.before_stop_df, 'before_stop_df.pkl')
        save_torch_artifact(ex, exp.model.state_dict(), 'model_params.pkl')
        if save_predictions:
            exp.model.eval()
            for setname in ('train', 'valid', 'test'):
                log.info(
                    "Compute and save predictions for {:s}...".format(setname))
                dataset = exp.datasets[setname]
                log.info("Save labels for {:s}...".format(setname))
                save_npy_artifact(ex, dataset.y,
                                  '{:s}_trial_labels.npy'.format(setname))
                preds_per_batch = [
                    var_to_np(exp.model(np_to_var(b[0]).cuda()))
                    for b in exp.iterator.get_batches(dataset, shuffle=False)
                ]
                preds_per_trial = compute_preds_per_trial(
                    preds_per_batch,
                    dataset,
                    input_time_length=exp.iterator.input_time_length,
                    n_stride=exp.iterator.n_preds_per_input)
                mean_preds_per_trial = [
                    np.mean(preds, axis=(0, 2)) for preds in preds_per_trial
                ]
                mean_preds_per_trial = np.array(mean_preds_per_trial)
                log.info("Save trial predictions for {:s}...".format(setname))
                save_npy_artifact(ex, mean_preds_per_trial,
                                  '{:s}_trial_preds.npy'.format(setname))
                if save_crop_predictions:
                    log.info(
                        "Save crop predictions for {:s}...".format(setname))
                    save_npy_artifact(ex, preds_per_trial,
                                      '{:s}_crop_preds.npy'.format(setname))

    else:
        return exp