def year(): executor = HyperParameterSearch(pipeline_cls=HoldoutPipeline, objective_cls=MseObjective, output_dir=ex.artifacts_dir) config = dict(dataset=FileDataset(file_path='data/yearmsd.csv', standardize=True, shuffle=False), split=dict(train_size=0.9, ), method=PiEnsemble, hyper_params=dict( ensemble_size=5, aggreg_func=no_aggreg, hidden_size=[100, 100], epochs=5000, batch_size=1000, optimizer=Adam, learning_rate=lambda t: t.suggest_discrete_uniform( 'learning_rate', 0.001, 0.01, 0.001), scheduler=ExponentialDecay, decay_rate=lambda t: t.suggest_discrete_uniform( 'decay_rate', 0.95, 1., 0.01), decay_steps=50., early_stopping=True, punish_crossing=False, patience=100, delta=1e-6, tolerance=0.01, loss_func=mse_loss, alpha=None, print_frequency=10, metrics=[mse], device='cpu'), num_trials=300)
def year(): config = dict( dataset=FileDataset( file_path='data/yearmsd.csv', standardize=True, shuffle=False ), split=dict( train_size=0.9, test_size=0.1 ), num_runs=1, method=PiEnsemble, hyper_params=dict( # HPS trial number 47 ensemble_size=5, aggreg_func=mean_aggreg, hidden_size=[100, 100], epochs=4, batch_size=1000, optimizer=Adam, learning_rate=0.009, scheduler=ExponentialDecay, decay_rate=0.95, decay_steps=50., loss_func=mse_loss, alpha=None, metrics=[mse], print_frequency=10, device='cpu' ) )
def year(): config = dict( dataset=FileDataset(file_path='data/yearmsd.csv', standardize=True, shuffle=False), split=dict(train_size=0.9, test_size=0.1), num_runs=1, method=PiEnsemble, hyper_params=dict( # from HPS trial 77 and then manually fine-tuned ensemble_size=5, aggreg_func=[sem_aggreg, std_aggreg, snm_aggreg], hidden_size=[100, 100], optimizer=Adam, learning_rate=0.005, scheduler=ExponentialDecay, decay_steps=50., decay_rate=0.99, epochs=40, batch_size=1000, loss_func=qd_plus_loss, alpha=0.05, soften=160., lambda_1=0.999, lambda_2=0.1, ksi=10., print_frequency=1, device='cpu'))
def year(): config = dict( dataset=FileDataset( file_path='data/yearmsd.csv', standardize=True, shuffle=False ), split=dict( train_size=0.9, test_size=0.1 ), num_runs=1, method=MvEnsemble, hyper_params=dict( # HPS trial number 2 ensemble_size=5, aggreg_func=[mv_aggreg], hidden_size=[100, 100], epochs=4, batch_size=100, optimizer=Adam, learning_rate=0.004, scheduler=ExponentialDecay, decay_rate=.99, decay_steps=50., loss_func=normal_loss, epsilon=None, # `None` to disable adversarial examples alpha=0.05, print_frequency=1, device='cpu' ) )
def year(): config = dict( dataset=FileDataset(file_path='data/yearmsd.csv', standardize=True, shuffle=False), split=dict( train_size=0.9, val_size=0., test_size=0.1, ), num_runs=1, method=PiEnsemble, hyper_params=dict( # HPS trial number 206 ensemble_size=5, aggreg_func=[sem_aggreg, std_aggreg], hidden_size=[100, 100], optimizer=Adam, learning_rate=0.001, scheduler=ExponentialDecay, decay_steps=50., decay_rate=1.0, epochs=64, batch_size=1000, loss_func=qd_code_loss, alpha=0.05, soften=160., lambda_=11., print_frequency=10, device='cpu'))
def year_paper(): config = dict(dataset=FileDataset(file_path='data/yearmsd.csv', standardize=True, shuffle=False), split=dict(train_size=0.9, test_size=0.1), num_runs=1, method=PiEnsemble, hyper_params=dict(ensemble_size=5, aggreg_func=[sem_aggreg, std_aggreg], hidden_size=[100], optimizer=Adam, learning_rate=0.005, scheduler=ExponentialDecay, decay_steps=50., decay_rate=0.999, epochs=100, batch_size=1000, loss_func=qd_paper_loss, retry_on_crossing=False, alpha=0.05, soften=160., lambda_=15., print_frequency=10))