示例#1
0
def experiment(args):
    """
    Fits model to predict glossary terms based on textbook
    chapter content.

    Analyzes these models' efficacy on a test set, and reports
    model performance.
    """
    experiment_name = args["--expt"]
    experiments.run_experiment(experiment_name, args)
def main(final_hyperparameters):
    with open("./datasets/amazon_sparse.pkl", "rb") as f:
        amazon_data = pickle.load(f)

    train_X = amazon_data['Xtr'].toarray()
    train_y = amazon_data['Ytr'].toarray()

    test_X, test_y = None, None
    if final_hyperparameters:
        test_X = amazon_data['Xte'].toarray()
        test_y = amazon_data['Yte'].toarray()

    # Split dataset into training and validation
    perm = np.random.RandomState(0).permutation(train_X.shape[0])
    validation_X = train_X[perm[1201:], :]
    validation_y = train_y[perm[1201:]]
    train_X = train_X[perm[:1200], :]
    train_y = train_y[perm[:1200]]

    # You can try different seeds and check the model's performance!
    seeds = np.random.RandomState(0).randint(low=0, high=65536, size=(10))

    train_accuracies = []
    validation_accuracies = []
    test_accuracies = []

    AMAZON_HYPERPARAMETERS["debug"] = False
    AMAZON_HYPERPARAMETERS["num_classes"] = 50
    for seed in seeds:
        AMAZON_HYPERPARAMETERS["rng"] = np.random.RandomState(seed)

        train_accuracy, validation_accuracy, test_accuracy = run_experiment(
            AMAZON_HYPERPARAMETERS, train_X, train_y, validation_X,
            validation_y, test_X, test_y)

        print(
            f"Seed: {seed} - Train Accuracy: {train_accuracy} - Validation Accuracy: {validation_accuracy} - Test Accuracy: {test_accuracy}"
        )
        train_accuracies.append(train_accuracy)
        validation_accuracies.append(validation_accuracy)
        test_accuracies.append(test_accuracy)

    print(
        f"Train Accuracies - Mean: {np.mean(train_accuracies)} - Standard Deviation: {np.std(train_accuracies, ddof=0)}"
    )
    print(
        f"Validation Accuracies - Mean: {np.mean(validation_accuracies)} - Standard Deviation: {np.std(validation_accuracies, ddof=0)}"
    )
    print(
        f"Test Accuracies - Mean: {np.mean(test_accuracies)} - Standard Deviation: {np.std(test_accuracies, ddof=0)}"
    )
def main(final_hyperparameters):
    with open("./datasets/occupancy.pkl", "rb") as f:
        occupancy_data = pickle.load(f)

    # Training Data
    train_X = occupancy_data['Xtr']
    train_y = occupancy_data['Ytr']

    # Validation Data
    validation_X = occupancy_data['Xte']
    validation_y = occupancy_data['Yte']

    # Testing Data
    test_X, test_y = None, None
    if final_hyperparameters:
        test_X = occupancy_data['Xte2']
        test_y = occupancy_data['Yte2']

    # You can try different seeds and check the model's performance!
    seeds = np.random.RandomState(0).randint(low=0, high=65536, size=(10))

    train_accuracies = []
    validation_accuracies = []
    test_accuracies = []

    OCCUPANCY_HYPERPARAMETERS["debug"] = False
    OCCUPANCY_HYPERPARAMETERS["num_classes"] = 2
    for seed in seeds:
        OCCUPANCY_HYPERPARAMETERS["rng"] = np.random.RandomState(seed)

        train_accuracy, validation_accuracy, test_accuracy = run_experiment(
            OCCUPANCY_HYPERPARAMETERS, train_X, train_y, validation_X,
            validation_y, test_X, test_y)

        print(
            f"Seed: {seed} - Train Accuracy: {train_accuracy} - Validation Accuracy: {validation_accuracy} - Test Accuracy: {test_accuracy}"
        )
        train_accuracies.append(train_accuracy)
        validation_accuracies.append(validation_accuracy)
        test_accuracies.append(test_accuracy)

    print(
        f"Train Accuracies - Mean: {np.mean(train_accuracies)} - Standard Deviation: {np.std(train_accuracies, ddof=0)}"
    )
    print(
        f"Validation Accuracies - Mean: {np.mean(validation_accuracies)} - Standard Deviation: {np.std(validation_accuracies, ddof=0)}"
    )
    print(
        f"Test Accuracies - Mean: {np.mean(test_accuracies)} - Standard Deviation: {np.std(test_accuracies, ddof=0)}"
    )
示例#4
0
        "enable": 0.0, # float value other than 0.0 will enable
        "world_size": 300.0, # don't set to very small value otherwise cloud movement looks discrete
        "speed_multiplier": 5.0, # larger values give faster moving cloud (shadow)
        "coverage_modifier": 0.0, # -1.0 ~ 1.0, larger value gives larger coverage of shadow 
    }),
    "rotational_light": AttrDict({ # light rotates about x-axis within +-interval with fixed step size
        "enable": 0.0, # float value other than 0.0 will enable
        "interval": 10.0, # light rotate in the range of light_original_rotation +- interval
        "step": 1.0 # larger number gives faster rotating light source
    }),
    "position": AttrDict({ # starting position of the object
        "x": -120.9,
        "y": 27.4834,
        "z": 792.7
    }),
    "landing_zone": AttrDict({
        "enable":  0.0, # float value other than 0.0 will enable
        "offset": AttrDict({
            "x": 31.5,
            "y": -47.4,
            "z": 23.5
        })
    })
})
####

env = make_unity_env(config)
run_experiment(env,
               logger_kwargs={'base_dir': './results_ver2/Basic-Freefall'},
               controller_kwargs={'disable_control': True})
示例#5
0
        "coverage_modifier": 0.0, # -1.0 ~ 1.0, larger value gives larger coverage of shadow 
    }),
    "rotational_light": AttrDict({ # light rotates about x-axis within +-interval with fixed step size
        "enable": 0.0, # float value other than 0.0 will enable
        "interval": 10.0, # light rotate in the range of light_original_rotation +- interval
        "step": 1.0 # larger number gives faster rotating light source
    }),
    "position": AttrDict({ # starting position of the object
        "x": -120.9,
        "y": 27.4834,
        "z": 792.7
    }),
    "landing_zone": AttrDict({
        "enable":  0.0, # float value other than 0.0 will enable
        "offset": AttrDict({
            "x": 31.5,
            "y": -47.4,
            "z": 23.5
        })
    })
})
####

env = make_unity_env(config)
run_experiment(env,
               logger_kwargs={'base_dir': './results_ver2/Basic'},
               controller_kwargs={
                   'max_velocity': 24,
                   'smooth_window': 10
               })
示例#6
0
        "enable": 0.0, # float value other than 0.0 will enable
        "world_size": 300.0, # don't set to very small value otherwise cloud movement looks discrete
        "speed_multiplier": 5.0, # larger values give faster moving cloud (shadow)
        "coverage_modifier": 0.0, # -1.0 ~ 1.0, larger value gives larger coverage of shadow 
    }),
    "rotational_light": AttrDict({ # light rotates about x-axis within +-interval with fixed step size
        "enable": 0.0, # float value other than 0.0 will enable
        "interval": 10.0, # light rotate in the range of light_original_rotation +- interval
        "step": 1.0 # larger number gives faster rotating light source
    }),
    "position": AttrDict({ # starting position of the object
        "x": -120.9,
        "y": 27.4834, 
        "z": 792.7
    }),
    "landing_zone": AttrDict({
        "enable":  0.0, # float value other than 0.0 will enable
        "offset": AttrDict({
            "x": 31.5,
            "y": -47.4,
            "z": 23.5
        })
    })
})
####

env = make_unity_env(config)
run_experiment(env,
    logger_kwargs={'base_dir': './results_ver2/Oracle'},
    controller_kwargs={'max_velocity': 24, 'smooth_window': 10, 'use_oracle': True})
示例#7
0
import os.path
import sys
import experiments

args = sys.argv
if len(args) > 1:
    experiment_fname = args[1]
    print(experiment_fname)
    if os.path.exists(experiment_fname):
        experiments.load_and_plot(experiment_fname)
    else:
        print(experiments.run_experiment(experiment_fname))
            loss, global_step=global_step)

    eval_metric_ops = None
    if mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            "accuracy": tf.metrics.accuracy(labels=labels,
                                            predictions=predictions)
        }

    return tf.estimator.EstimatorSpec(mode,
                                      predictions=predictions,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=eval_metric_ops)


hparams = {"batch_size": 128}

result, _ = experiments.run_experiment(
    estimator_fn=my_dnn,
    hparams=hparams,
    train_data=train_data,
    validation_data=val_data,
    test_data=test_data,
    model_dir="runs/test",
    max_steps=15000,
    #steps_before_early_stopping=8000,
    early_stopping_fn=experiments.early_stopping("accuracy", operator.le))

print(result.results)
示例#9
0
    if mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            "accuracy": tf.metrics.accuracy(labels=labels,
                                            predictions=predictions)
        }

    return tf.estimator.EstimatorSpec(mode,
                                      predictions=predictions,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=eval_metric_ops)


hparams = {
    "batch_size": 128,
    "activation": "relu",
    "dropout": 0.5,
    "optimizer": "Adam",
    "learning_rate": 1e-4
}

result, _ = experiments.run_experiment(estimator_fn=my_cnn,
                                       hparams=hparams,
                                       train_data=train_data,
                                       validation_data=val_data,
                                       test_data=test_data,
                                       model_dir="runs/test",
                                       max_steps=5000)

print(result.results)
示例#10
0
    return ResNetPoolMixed(640512,
                           dim=(3 * 3**9, ),
                           n_channels=1,
                           batch_size=batch_size,
                           args=args)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-d", "-data", help="gtzan, mtat or msd")
    parser.add_argument("-logging", help="Logs to csv file")
    parser.add_argument("-gpu", type=list, help="Run on gpu's, and which")
    parser.add_argument("-local", help="Whether to run local or on server")
    parser.add_argument("-cross",
                        help="Whether to run cross experiments or not")

    args = parser.parse_args()

    build_model = None
    if args.local == 'True':
        build_model = build_basic
    else:
        build_model = build_resnet_mixed_layer
        #check_weights(build_model().build_model(), "C:\\Users\\kkr\\Desktop\\Thesis\\best_weights_max_average_net_1.6e-05.hdf5")
        #check_weight(build_model().build_model(), "C:\\Users\\kkr\\Desktop\\Thesis\\best_weights_mixed_net_8e-05.hdf5")

    if args.cross:
        exp.run_cross_experiment(build_model, args)
    else:
        exp.run_experiment(build_model, args)