예제 #1
0
def boston():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/boston_housing_data.csv',
            shuffle_path='data/boston_housing_data.npy',
            standardize=True,
            shuffle=0  # shuffle_id
        ),
        split=dict(train_size=0.9, ),
        method=PiEnsemble,
        hyper_params=dict(ensemble_size=1,
                          aggreg_func=no_aggreg,
                          hidden_size=[50, 50],
                          epochs=5000,
                          batch_size=100,
                          optimizer=Adam,
                          learning_rate=lambda t: t.suggest_discrete_uniform(
                              'learning_rate', 0.001, 0.01, 0.001),
                          scheduler=ExponentialDecay,
                          decay_rate=lambda t: t.suggest_discrete_uniform(
                              'decay_rate', 0.95, 1., 0.01),
                          decay_steps=50.,
                          early_stopping=True,
                          punish_crossing=False,
                          patience=500,
                          delta=1e-6,
                          tolerance=0.01,
                          loss_func=mse_loss,
                          alpha=None,
                          print_frequency=10,
                          metrics=[mse],
                          device='cpu'),
        num_trials=300)
예제 #2
0
def boston():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/boston_housing_data.csv',
            shuffle_path='data/boston_housing_data.npy',
            standardize=True,
            shuffle=True
        ),
        split=dict(
            train_size=0.9,
            test_size=0.1
        ),
        num_runs=20,
        method=PiEnsemble,
        hyper_params=dict(  # HPS trial number 227
            ensemble_size=5,
            aggreg_func=mean_aggreg,
            hidden_size=[50, 50],
            epochs=129,
            batch_size=100,
            optimizer=Adam,
            learning_rate=0.004,
            scheduler=ExponentialDecay,
            decay_rate=0.99,
            decay_steps=50.,
            loss_func=mse_loss,
            alpha=None,
            metrics=[mse],
            print_frequency=10,
            device='cpu'
        )
    )
예제 #3
0
파일: hps_qd2.py 프로젝트: tarik/pi-snm-qde
def protein():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/protein.csv',
            shuffle_path='data/protein.npy',
            standardize=True,
            shuffle=0  # shuffle_id
        ),
        split=dict(train_size=0.9, ),
        method=PiEnsemble,
        hyper_params=dict(ensemble_size=1,
                          aggreg_func=no_aggreg,
                          hidden_size=[100, 100],
                          epochs=5000,
                          batch_size=100,
                          optimizer=Adam,
                          learning_rate=lambda t: t.suggest_discrete_uniform(
                              'learning_rate', 0.001, 0.01, 0.001),
                          scheduler=ExponentialDecay,
                          decay_rate=lambda t: t.suggest_discrete_uniform(
                              'decay_rate', 0.95, 1., 0.01),
                          decay_steps=50.,
                          early_stopping=True,
                          patience=500,
                          delta=1e-6,
                          tolerance=0.01,
                          loss_func=qd_code_loss,
                          alpha=0.05,
                          soften=160.,
                          lambda_=lambda t: t.suggest_discrete_uniform(
                              'lambda_', 1., 50., 1.),
                          print_frequency=10,
                          device='cpu'),
        num_trials=300)
예제 #4
0
파일: exp_qd.py 프로젝트: tarik/pi-snm-qde
def yacht_code():
    """
    Original HPs with `alpha=0.01`.
    Therefore new HPs with random search with the objective to optimize
    for the aggregation as did Pearce et al. (2018);
    confirmed in our email communication.
    """
    config = dict(dataset=ShuffledDataset(data_path='data/yacht.csv',
                                          shuffle_path='data/yacht.npy',
                                          standardize=True,
                                          shuffle=True),
                  split=dict(train_size=0.9, test_size=0.1),
                  num_runs=20,
                  method=PiEnsemble,
                  hyper_params=dict(ensemble_size=5,
                                    aggreg_func=[sem_aggreg, std_aggreg],
                                    hidden_size=[50],
                                    optimizer=Adam,
                                    learning_rate=0.009,
                                    scheduler=ExponentialDecay,
                                    decay_steps=50.,
                                    decay_rate=0.98,
                                    epochs=2500,
                                    batch_size=100,
                                    loss_func=qd_code_loss,
                                    alpha=0.05,
                                    soften=160.,
                                    lambda_=16.,
                                    print_frequency=10))
예제 #5
0
파일: exp_qd.py 프로젝트: tarik/pi-snm-qde
def boston_dev():
    config = dict(dataset=ShuffledDataset(
        data_path='data/boston_housing_data.csv',
        shuffle_path='data/boston_housing_data.npy',
        standardize=True,
        shuffle=True),
                  split=dict(train_size=0.9, test_size=0.1),
                  num_runs=5,
                  method=PiEnsemble,
                  hyper_params=dict(ensemble_size=5,
                                    aggreg_func=[sem_aggreg, std_aggreg],
                                    hidden_size=[50],
                                    epochs=5,
                                    batch_size=100,
                                    optimizer=Adam,
                                    learning_rate=0.02,
                                    scheduler=ExponentialDecay,
                                    decay_rate=0.9,
                                    decay_steps=50.,
                                    loss_func=qd_code_loss,
                                    alpha=0.05,
                                    soften=160.,
                                    lambda_=15.,
                                    print_frequency=10))
    seed = 1
예제 #6
0
def concrete():
    config = dict(
        dataset=ShuffledDataset(data_path='data/concrete_data.csv',
                                shuffle_path='data/concrete_data.npy',
                                standardize=True,
                                shuffle=True),
        split=dict(
            train_size=0.9,
            val_size=0.,
            test_size=0.1,
        ),
        num_runs=20,
        method=PiEnsemble,
        hyper_params=dict(  # HPS trial number 210
            ensemble_size=5,
            aggreg_func=[sem_aggreg, std_aggreg, snm_aggreg],
            hidden_size=[50, 50],
            optimizer=Adam,
            learning_rate=0.007,
            scheduler=ExponentialDecay,
            decay_steps=50.,
            decay_rate=1.,
            epochs=680,
            batch_size=100,
            loss_func=qd_plus_loss,
            alpha=0.05,
            soften=160.,
            lambda_1=0.9958,
            lambda_2=0.34,
            ksi=10.,
            print_frequency=10,
            device='cpu'))
예제 #7
0
def yacht():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/yacht.csv',
            shuffle_path='data/yacht.npy',
            standardize=True,
            shuffle=0  # shuffle_id
        ),
        split=dict(train_size=0.9, ),
        method=MvEnsemble,
        hyper_params=dict(ensemble_size=1,
                          aggreg_func=no_aggreg,
                          hidden_size=[50, 50],
                          epochs=1000,
                          batch_size=100,
                          optimizer=Adam,
                          learning_rate=lambda t: t.suggest_discrete_uniform(
                              'learning_rate', 0.001, 0.01, 0.001),
                          scheduler=ExponentialDecay,
                          decay_rate=lambda t: t.suggest_discrete_uniform(
                              'decay_rate', 0.95, 1., 0.01),
                          decay_steps=50,
                          loss_func=normal_loss,
                          epsilon=None,
                          alpha=0.05,
                          early_stopping=True,
                          patience=100,
                          delta=1e-6,
                          tolerance=0.01,
                          print_frequency=10,
                          device='cpu'),
        num_trials=100)
예제 #8
0
def protein():
    config = dict(
        dataset=ShuffledDataset(data_path='data/protein.csv',
                                shuffle_path='data/protein.npy',
                                standardize=True,
                                shuffle=True),
        split=dict(train_size=0.9, test_size=0.1),
        num_runs=5,
        method=PiEnsemble,
        hyper_params=dict(  # HPS trial number 24 and then manually fine-tuned
            ensemble_size=5,
            aggreg_func=[sem_aggreg, std_aggreg, snm_aggreg],
            hidden_size=[100, 100],
            optimizer=Adam,
            learning_rate=0.02,
            scheduler=ExponentialDecay,
            decay_steps=50.,
            decay_rate=0.99,
            epochs=80,
            batch_size=100,
            loss_func=qd_plus_loss,
            alpha=0.05,
            soften=160.,
            lambda_1=0.9976,
            lambda_2=0.24,
            ksi=10.,
            print_frequency=10,
            device='cpu'))
예제 #9
0
def naval():
    config = dict(
        dataset=ShuffledDataset(data_path='data/naval_compressor_decay.csv',
                                shuffle_path='data/naval_compressor_decay.npy',
                                standardize=True,
                                shuffle=True),
        split=dict(train_size=0.9, test_size=0.1),
        num_runs=20,
        method=PiEnsemble,
        hyper_params=dict(  # HPS trial number 21 and then manually fine-tuned
            ensemble_size=5,
            aggreg_func=[sem_aggreg, std_aggreg, snm_aggreg],
            hidden_size=[50, 50],
            optimizer=Adam,
            learning_rate=0.001,
            scheduler=ExponentialDecay,
            decay_steps=50.,
            decay_rate=0.9975,
            epochs=1766,
            batch_size=100,
            loss_func=qd_plus_loss,
            alpha=0.05,
            soften=160.,
            lambda_1=0.9708,
            lambda_2=0.63,
            ksi=10.,
            print_frequency=10,
            device='cpu'))
예제 #10
0
def boston():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/boston_housing_data.csv',
            shuffle_path='data/boston_housing_data.npy',
            standardize=True,
            shuffle=True
        ),
        split=dict(
            train_size=0.9,
            test_size=0.1
        ),
        num_runs=20,
        method=MvEnsemble,
        hyper_params=dict(  # HPS trial number 40
            ensemble_size=5,
            aggreg_func=[mv_aggreg],
            hidden_size=[50, 50],
            epochs=152,
            batch_size=100,
            optimizer=Adam,
            learning_rate=0.005,
            # scheduler=ExponentialDecay,
            # decay_rate=1.,
            # decay_steps=50.,
            loss_func=normal_loss,
            epsilon=0.1,  # `None` to disable adversarial examples
            alpha=0.05,
            print_frequency=10,
            device='cpu'
        )
    )
예제 #11
0
def yacht():
    config = dict(
        dataset=ShuffledDataset(data_path='data/yacht.csv',
                                shuffle_path='data/yacht.npy',
                                standardize=True,
                                shuffle=True),
        split=dict(
            train_size=0.9,
            val_size=0.,
            test_size=0.1,
        ),
        num_runs=20,
        method=PiEnsemble,
        hyper_params=dict(  # HPS trial number 193
            ensemble_size=5,
            aggreg_func=[sem_aggreg, std_aggreg],
            hidden_size=[50, 50],
            optimizer=Adam,
            learning_rate=0.006,
            scheduler=ExponentialDecay,
            decay_steps=50.,
            decay_rate=1.,
            epochs=1214,
            batch_size=100,
            loss_func=qd_code_loss,
            alpha=0.05,
            soften=160.,
            lambda_=1.,
            print_frequency=10,
            device='cpu'))
예제 #12
0
def wine():
    config = dict(
        dataset=ShuffledDataset(
            data_path='data/wine.csv',
            shuffle_path='data/wine.npy',
            standardize=True,
            shuffle=0  # shuffle_id
        ),
        split=dict(train_size=0.9, ),
        method=PiEnsemble,
        hyper_params=dict(
            ensemble_size=1,
            aggreg_func=no_aggreg,
            hidden_size=[50, 50],
            epochs=lambda trial: int(
                trial.suggest_discrete_uniform('num_epoch', 100, 1000, 40)),
            batch_size=100,
            optimizer=Adam,
            learning_rate=lambda t: t.suggest_discrete_uniform(
                'learning_rate', 0.001, 0.01, 0.001),
            scheduler=ExponentialDecay,
            decay_rate=lambda t: t.suggest_discrete_uniform(
                'decay_rate', 0.95, 1., 0.01),
            decay_steps=50.,
            early_stopping=False,
            patience=500,
            delta=1e-6,
            tolerance=0.01,
            loss_func=qd_plus_loss,
            alpha=0.05,
            soften=160.,
            lambda_1=lambda t: t.suggest_discrete_uniform(
                'lambda_1', 0.97, 0.999, 0.0002),
            lambda_2=lambda t: t.suggest_discrete_uniform(
                'lambda_2', 0.01, 0.99, 0.01),
            ksi=10.,
            print_frequency=10,
            device='cpu'),
        num_trials=1000)
예제 #13
0
파일: exp_qd.py 프로젝트: tarik/pi-snm-qde
def yacht_paper():
    config = dict(dataset=ShuffledDataset(data_path='data/yacht.csv',
                                          shuffle_path='data/yacht.npy',
                                          standardize=True,
                                          shuffle=True),
                  split=dict(train_size=0.9, test_size=0.1),
                  num_runs=20,
                  method=PiEnsemble,
                  hyper_params=dict(ensemble_size=5,
                                    aggreg_func=[sem_aggreg, std_aggreg],
                                    hidden_size=[50],
                                    optimizer=Adam,
                                    learning_rate=0.009,
                                    scheduler=ExponentialDecay,
                                    decay_steps=50.,
                                    decay_rate=0.98,
                                    epochs=2500,
                                    batch_size=100,
                                    loss_func=qd_paper_loss,
                                    alpha=0.05,
                                    soften=160.,
                                    lambda_=16.,
                                    print_frequency=10))
예제 #14
0
파일: exp_qd.py 프로젝트: tarik/pi-snm-qde
def protein_paper():
    config = dict(dataset=ShuffledDataset(data_path='data/protein.csv',
                                          shuffle_path='data/protein.npy',
                                          standardize=True,
                                          shuffle=True),
                  split=dict(train_size=0.9, test_size=0.1),
                  num_runs=5,
                  method=PiEnsemble,
                  hyper_params=dict(ensemble_size=5,
                                    aggreg_func=[sem_aggreg, std_aggreg],
                                    hidden_size=[100],
                                    optimizer=Adam,
                                    learning_rate=0.002,
                                    scheduler=ExponentialDecay,
                                    decay_steps=50.,
                                    decay_rate=0.999,
                                    epochs=600,
                                    batch_size=100,
                                    loss_func=qd_paper_loss,
                                    retry_on_crossing=False,
                                    alpha=0.05,
                                    soften=160.,
                                    lambda_=40.,
                                    print_frequency=10))