Beispiel #1
0
def test_remove_last_feature_raises_value_error():
    n_features = 1
    n_samples = 10
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(n_samples)
    dataset = Dataset(features, labels)
    with pytest.raises(ValueError):
        dataset.remove_single_feature()
Beispiel #2
0
def saved_random_dataset():
    n_features = 5
    n_samples = 10
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(n_samples)
    path_to_save = 'test_pickle_save.pkl'
    dataset = Dataset(features, labels)
    dataset.save_to_pickle(path_to_save)
    return path_to_save, dataset
Beispiel #3
0
def test_load_with_too_many_feature_columns_raises_value_error(
        saved_random_dataset):
    path, dataset = saved_random_dataset
    n_features = dataset.features.shape[1]
    wrong_n_features = n_features + 3

    dataset = Dataset()
    with pytest.raises(ValueError):
        dataset.load_from_pickle(path, range(wrong_n_features),
                                 range(n_features, n_features + 1))
Beispiel #4
0
def test_save_and_load_to_pickle_identical_dataset(generate_random_dataset):
    path_to_save = 'test_pickle_save.pkl'

    dataset = generate_random_dataset
    n_features = dataset.features.shape[1]
    dataset.save_to_pickle(path_to_save)

    loaded_dataset = Dataset()
    loaded_dataset.load_from_pickle(path_to_save, range(n_features),
                                    range(n_features, n_features + 1))

    assert np.array_equal(loaded_dataset.features, dataset.features)
    assert np.array_equal(loaded_dataset.labels, dataset.labels.reshape(-1, 1))
Beispiel #5
0
def test_save_and_load_to_csv_identical_dataset(generate_random_dataset):
    path_to_save = 'test_csv_save.csv'

    dataset = generate_random_dataset
    n_features = dataset.features.shape[1]
    dataset.save_to_csv(path_to_save)

    loaded_dataset = Dataset()
    loaded_dataset.load_from_csv(path_to_save, range(n_features),
                                 range(n_features, n_features + 1))

    assert np.all(np.isclose(loaded_dataset.features, dataset.features))
    assert np.all(
        np.isclose(loaded_dataset.labels, dataset.labels.reshape(-1, 1)))
Beispiel #6
0
def generate_random_dataset():
    n_features = 5
    n_samples = 10
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(n_samples)
    dataset = Dataset(features, labels)
    return dataset
Beispiel #7
0
def test_constructor_with_valid_arguments():
    n_features = 5
    n_samples = 10
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(n_samples)
    dataset = Dataset(features, labels)
    assert np.array_equal(features, dataset.features)
    assert np.array_equal(labels, dataset.labels)
Beispiel #8
0
def test_constructor_with_non_matching_argument_sizes_raises_value_error():
    n_features = 5
    n_samples = 10
    wrong_n_samples = n_samples - 2
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(wrong_n_samples)

    with pytest.raises(ValueError):
        Dataset(features, labels)
Beispiel #9
0
def test_save_and_load_to_pickle_identical_file_multiple_labels():
    n_features = 5
    n_samples = 10
    n_labels = 3
    features = np.random.rand(n_samples, n_features)
    labels = np.random.rand(n_samples, n_labels)
    path_to_save = 'test_pickle_save.pkl'

    dataset = Dataset(features, labels)
    dataset.save_to_pickle(path_to_save)

    loaded_dataset = Dataset()
    loaded_dataset.load_from_pickle(path_to_save, range(n_features),
                                    range(n_features, n_features + n_labels))

    assert np.array_equal(loaded_dataset.features, dataset.features)
    assert np.array_equal(loaded_dataset.labels, dataset.labels)
from horseshoe_bnn.data_handling.dataset import Dataset
from horseshoe_bnn.evaluation.evaluator import evaluate

from sklearn.datasets import load_boston
from sklearn.preprocessing import PolynomialFeatures, StandardScaler

root = os.getcwd()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(1)
"""
Set up the datasets.
When training on a different dataset, please change the code below
"""
boston = load_boston()
features, labels = boston.data, boston.target
boston_dataset = Dataset(features, labels, 'boston')
"""
Choose dataset for training/testing.
When training on a different dataset, please change the code below
"""
dataset = boston_dataset
"""
Set number of epochs the models should be trained for
"""
n_epochs = 5


def run_evaluation(config_path, create_hyperparameters, model_instance,
                   metrics):
    with open(config_path) as c:
        config = yaml.load(c)
Beispiel #11
0
def test_add_bias_with_empty_dataset_raises_value_error():
    dataset = Dataset()
    with pytest.raises(ValueError):
        dataset.add_bias()
Beispiel #12
0
def test_compute_polynomial_features_with_empty_dataset_raises_value_error():
    dataset = Dataset()
    with pytest.raises(ValueError):
        dataset.compute_polynomial_features()
Beispiel #13
0
def test_normalize_with_empty_dataset_raises_value_error():
    dataset = Dataset()
    with pytest.raises(ValueError):
        dataset.normalize()
Beispiel #14
0
def test_load_nonexisting_file_raises_filenotfound_error():
    path_to_file = 'nonexisting_file.pkl'
    dataset = Dataset()

    with pytest.raises(FileNotFoundError):
        dataset.load_from_pickle(path_to_file, range(1), range(1, 2))
Beispiel #15
0
def test_constructor_with_non_matching_argument_types_raises_type_error():
    features = None
    labels = np.linspace(0, 9, 10)

    with pytest.raises(TypeError):
        Dataset(features, labels)
Beispiel #16
0
def test_constructor_with_invalid_arguments_types_raises_type_error():
    features = 'a'
    labels = 'b'

    with pytest.raises(TypeError):
        Dataset(features, labels)
Beispiel #17
0
def test_split_with_empty_dataset_raises_value_error():
    dataset = Dataset()
    with pytest.raises(ValueError):
        dataset.split()