예제 #1
0
def run(config, args):
    args_dict = vars(args)
    for parameter, value in config['params'].items():
        if parameter not in ('train', 'dev', 'test', 'cuda', 'results_path'):
            args_dict[parameter] = value
    print(args)
    run_experiment(args, config['seed'])
예제 #2
0
 def experiment(self,
                experiment_id,
                interval_secs,
                model_params,
                data_params):
     """
     Example usage:
     
     python3.4 run.py experiment \
         --experiment-id=60_mins_reduce_hash \
         --interval-secs=3600 \
         --model-params='{"learning_rate":0.05, "l1_regularization_strength":0.00, "l2_regularization_strength":0.5}' \
         --data-params='{"shuffle":True, "num_epochs":1, "num_threads":2, batch_size:256}'
         
     :param experiment_id: 
     :param interval_secs: 
     :param model_params: 
     :param data_params: 
     :return: 
     """
     run_experiment(
         experiment_id=experiment_id,
         interval_secs=interval_secs,
         model_hyperparams=model_params,
         data_hyperparams=data_params)
예제 #3
0
def run(config, args, n_iter=200):
    for i in range(n_iter):
        args_dict = vars(args)
        args_dict['categorical_features'] = cat_features
        args_dict['continuous_features'] = cont_features
        args_dict.update(sample_config(config))
        print(args)
        seed = make_seed()
        run_experiment(args, seed)
# --------------------------------
# Load data for word models
# --------------------------------

train, val, test, full, vocab_size, index_word = load_and_shape_data(
    'data/Ellen_tweetdata.txt', char_model=False, seq_len=8)

run_experiment(train[0],
               train[1],
               val[0],
               val[1],
               test[0],
               test[1],
               full,
               exp_name='validation-ellen',
               epochs=20,
               char_model=False,
               seq_len=8,
               depth=1,
               dropout=0,
               batch_norm=False,
               vocab_size=vocab_size,
               vocab_dict=index_word)

# --------------------------------
# Load data for word models
# --------------------------------

train, val, test, full, vocab_size, index_word = load_and_shape_data(
    'data/Michelleobama_tweetdata.txt', char_model=False, seq_len=8)
예제 #5
0
train, val, test, full, vocab_size, index_word = load_and_shape_data(
    'data/Trump_tweetdata.txt', char_model=True, seq_len=12)

# --------------------------------
# Depth Experiments
# --------------------------------

run_experiment(train[0],
               train[1],
               val[0],
               val[1],
               test[0],
               test[1],
               full,
               exp_name='char-depth-1',
               epochs=20,
               char_model=True,
               seq_len=12,
               depth=1,
               dropout=0,
               batch_norm=False,
               vocab_size=vocab_size,
               vocab_dict=index_word)

run_experiment(train[0],
               train[1],
               val[0],
               val[1],
               test[0],
               test[1],
               full,
예제 #6
0
            conf_name = f"{original_conf_name}_{conf_up_counter}"
        else:
            print(
                f"No compatibles to {original_conf_name} benchmarked, adding new condition {conf_name}."
            )
            benchmark_dict["results"].update({conf_name: {"n": 0}})

        benchmark_dict["results"][conf_name].update({"config": config})

        for i in range(args.repetitions):
            print(
                f"\nRepetition {i + 1}/{args.repetitions} in environment {args.env} with config {conf_name}."
            )
            reward_history = np.array(
                run_experiment(args.env,
                               config,
                               init_ray=should_init,
                               verbose=False).cycle_reward_history)
            should_init = False

            current_n = benchmark_dict["results"][conf_name]["n"]
            if current_n == 0:
                means, var = reward_history, np.zeros_like(reward_history)
                mean_max, var_max = np.max(reward_history), np.array(0)
            else:
                means, var = increment_mean_var(
                    np.array(benchmark_dict["results"][conf_name]["means"]),
                    np.array(benchmark_dict["results"][conf_name]["var"]),
                    reward_history, np.zeros_like(reward_history), current_n)
                mean_max, var_max = increment_mean_var(
                    benchmark_dict["results"][conf_name]["mean_max"],
                    benchmark_dict["results"][conf_name]["var_max"],
예제 #7
0
                        # update settings
                        args.n_agents = n_agents
                        args.varying_start_value = True
                        args.timesteps = timesteps
                        args.compute_frequency_increment_values = False
                        args.model = model
                        args.batch_size = batch_size
                        args.learning_rate = learning_rate
                        args.normalize_samples = normalize_samples

                        dict_args = vars(args)
                        dict_args.update(CONFIG)

                        args.test_samples = test_samples

                        run_id, nn_results, nn_scores = run_experiment(args)

                        plot_parameter_sweep(fit_results, nn_results).savefig(
                            f"../results/{run_id}_params_sweep.png", dpi=300)
                        plot_fp_scores(fit_results, nn_results).savefig(
                            f"../results/{run_id}_fp_scores.png", dpi=300)

                        dict_args.update(nn_scores)
                        dict_args.update(fit_scores)
                        dict_args["runid"] = run_id

                        df = pd.DataFrame([{
                            k: v
                            for k, v in dict_args.items()
                            if k != "test_samples"
                        }])