def test_when_hyperparameter_tune_called_on_prophet_then_hyperparameters_are_passed_to_underlying_model( temp_model_path, ): scheduler_options = scheduler_factory(hyperparameter_tune_kwargs="auto") model = ProphetModel( path=temp_model_path, freq="H", prediction_length=4, hyperparameters={ "growth": "linear", "n_changepoints": ag.Int(3, 4) }, ) _, _, results = model.hyperparameter_tune( scheduler_options=scheduler_options, time_limit=100, train_data=DUMMY_TS_DATAFRAME, val_data=DUMMY_TS_DATAFRAME, ) assert len(results["config_history"]) == 2 assert results["config_history"][0]["n_changepoints"] == 3 assert results["config_history"][1]["n_changepoints"] == 4 assert all(c["growth"] == "linear" for c in results["config_history"].values())
def test_given_hyperparameters_with_spaces_when_learner_called_then_hpo_is_performed( temp_model_path, model_name): hyperparameters = {model_name: {"epochs": ag.Int(1, 3)}} # mock the default hps factory to prevent preset hyperparameter configurations from # creeping into the test case with mock.patch("autogluon.timeseries.models.presets.get_default_hps" ) as default_hps_mock: default_hps_mock.return_value = defaultdict(dict) learner = TimeSeriesLearner(path_context=temp_model_path, eval_metric="MAPE") learner.fit( train_data=DUMMY_TS_DATAFRAME, hyperparameters=hyperparameters, val_data=DUMMY_TS_DATAFRAME, hyperparameter_tune=True, ) leaderboard = learner.leaderboard() assert len(leaderboard) == 3 + 1 # include ensemble config_history = learner.load_trainer( ).hpo_results[model_name]["config_history"] assert len(config_history) == 3 assert all(1 <= model["epochs"] <= 3 for model in config_history.values())
def test_given_hyperparameters_with_spaces_and_custom_model_when_trainer_called_then_hpo_is_performed( temp_model_path, ): hyperparameters = { GenericGluonTSModelFactory(MQRNNEstimator): { "epochs": ag.Int(1, 4) } } # mock the default hps factory to prevent preset hyperparameter configurations from # creeping into the test case with mock.patch("autogluon.timeseries.models.presets.get_default_hps" ) as default_hps_mock: default_hps_mock.return_value = defaultdict(dict) trainer = AutoTimeSeriesTrainer(path=temp_model_path) trainer.fit( train_data=DUMMY_TS_DATAFRAME, hyperparameters=hyperparameters, val_data=DUMMY_TS_DATAFRAME, hyperparameter_tune=True, ) leaderboard = trainer.leaderboard() assert len(leaderboard) == 4 + 1 # ensemble config_history = next(iter(trainer.hpo_results.values()))["config_history"] assert len(config_history) == 4 assert all(1 <= model["epochs"] <= 4 for model in config_history.values())
def test_given_hyperparameter_spaces_to_init_when_fit_called_then_error_is_raised( model_class, temp_model_path): model = model_class( path=temp_model_path, freq="H", quantile_levels=[0.1, 0.9], hyperparameters={ "epochs": ag.Int(3, 4), }, ) with pytest.raises(ValueError, match=".*hyperparameter_tune.*"): model.fit(train_data=DUMMY_TS_DATAFRAME, )
def test_given_hyperparameters_with_spaces_to_prophet_when_trainer_called_then_hpo_is_performed( temp_model_path, ): hyperparameters = {"Prophet": {"n_changepoints": ag.Int(1, 4)}} # mock the default hps factory to prevent preset hyperparameter configurations from # creeping into the test case with mock.patch("autogluon.timeseries.models.presets.get_default_hps" ) as default_hps_mock: default_hps_mock.return_value = defaultdict(dict) trainer = AutoTimeSeriesTrainer(path=temp_model_path) trainer.fit( train_data=DUMMY_TS_DATAFRAME, hyperparameters=hyperparameters, val_data=DUMMY_TS_DATAFRAME, hyperparameter_tune=True, ) leaderboard = trainer.leaderboard() assert len(leaderboard) == 4 + 1 config_history = trainer.hpo_results["Prophet"]["config_history"] assert len(config_history) == 4 assert all(1 <= model["n_changepoints"] <= 4 for model in config_history.values())
def test_given_hyperparameter_spaces_when_tune_called_then_tuning_output_correct( model_class, temp_model_path): scheduler_options = scheduler_factory(hyperparameter_tune_kwargs="auto") model = model_class( path=temp_model_path, freq="H", quantile_levels=[0.1, 0.9], hyperparameters={ "epochs": ag.Int(3, 4), }, ) _, _, results = model.hyperparameter_tune( scheduler_options=scheduler_options, time_limit=300, train_data=DUMMY_TS_DATAFRAME, val_data=DUMMY_TS_DATAFRAME, ) assert len(results["config_history"]) == 2 assert results["config_history"][0]["epochs"] == 3 assert results["config_history"][1]["epochs"] == 4
@pytest.mark.parametrize("target_column", ["target", "custom"]) @pytest.mark.parametrize( "hyperparameters", [ { "AutoETS": {}, "SimpleFeedForward": { "epochs": 1 } }, { "AutoETS": {}, "SimpleFeedForward": { "epochs": ag.Int(1, 3) } }, ], ) def test_given_hp_spaces_and_custom_target_when_predictor_called_predictor_can_predict( temp_model_path, hyperparameters, target_column): df = DUMMY_TS_DATAFRAME.rename(columns={"target": target_column}) fit_kwargs = dict( train_data=df, hyperparameters=hyperparameters, tuning_data=df, ) init_kwargs = dict(path=temp_model_path, prediction_length=2) if target_column != "target":
'task': 'ssd', 'dataset': args.dataset, 'estimator': 'ssd', 'base_network': None, 'transfer': ag.Categorical('ssd_512_vgg16_atrous_coco', 'ssd_300_resnet34_v1b_coco', 'ssd_512_resnet50_v1_coco', 'ssd_512_resnet101_v2_voc'), 'lr': ag.Real(1e-4, 1e-2, log=True), 'batch_size': ag.Int(3, 6), # [8, 16, 32, 64] 'momentum': ag.Real(0.85, 0.95), 'wd': ag.Real(1e-6, 1e-2, log=True), 'epochs': 20, 'num_trials': args.num_trials, 'search_strategy': 'bayesopt' } # specify learning task task = ObjectDetection(config)
# specify hyperparameter search space config = { 'task': 'classification', 'dataset': args.dataset, 'estimator': 'img_cls', 'model': ag.Categorical('resnet50_v1', 'resnet101_v1', 'resnet50_v2', 'resnet101_v2', 'resnet50_v1b', 'resnet101_v1b', 'resnest50', 'resnest101'), 'lr': ag.Real(1e-4, 1e-2, log=True), 'batch_size': ag.Int(4, 7), # [16, 32, 64, 128] 'momentum': ag.Real(0.85, 0.95), 'wd': ag.Real(1e-6, 1e-2, log=True), 'epochs': 15, 'num_trials': args.num_trials, 'search_strategy': 'bayesopt' } # specify learning task task = ImageClassification(config)
logging.info('args: {}'.format(args)) dataset_train, dataset_test = get_dataset(args) time_limit = 5 * 24 * 60 * 60 # 5 days epochs = 20 if args.meta_arch == 'yolo3': transfer = None if ('voc' in args.dataset_name) or ('coco' in args.dataset_name) else \ ag.Categorical('yolo3_darknet53_coco', 'yolo3_mobilenet1.0_coco') hyperparameters = { 'estimator': args.meta_arch, 'lr': ag.Categorical(1e-2, 5e-3, 1e-3, 5e-4, 1e-4, 5e-5), 'data_shape': ag.Categorical(320, 416), 'batch_size': 16, 'lr_decay_epoch': ag.Categorical([80, 90], [85, 95]), 'warmup_epochs': ag.Int(1, 10), 'warmup_iters': ag.Int(250, 1000), 'wd': ag.Categorical(1e-4, 5e-4, 2.5e-4), 'syncbn': ag.Categorical(True, False), 'epochs': epochs, 'transfer': transfer } kwargs = { 'num_trials': args.num_trials, 'time_limit': time_limit, 'dist_ip_addrs': [], 'nthreads_per_trial': 16, 'ngpus_per_trial': 8, 'hyperparameters': hyperparameters } elif args.meta_arch == 'faster_rcnn':
type=int, default=3, help='number of training trials') args = parser.parse_args() logging.info('user defined arguments: {}'.format(args)) # specify hyperparameter search space config = { 'task': 'yolo3', 'dataset': args.dataset, 'estimator': 'yolo3', 'base_network': None, 'transfer': ag.Categorical('yolo3_darknet53_voc', 'yolo3_darknet53_coco'), 'lr': ag.Real(1e-4, 1e-2, log=True), 'batch_size': ag.Int(3, 6), # [8, 16, 32, 64] 'momentum': ag.Real(0.85, 0.95), 'wd': ag.Real(1e-6, 1e-2, log=True), 'epochs': 20, 'num_trials': args.num_trials, 'search_strategy': 'bayesopt' } # specify learning task task = ObjectDetection(config) # specify dataset dataset = Dataset.get(args.dataset) train_data, valid_data = dataset.split(0.8) # fit auto estimator
def get_default_hps(key, prediction_length): default_model_hps = { "toy": { "SimpleFeedForward": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": 5, }, "MQCNN": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": 5 }, "DeepAR": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": 5 }, "AutoETS": {}, }, "toy_hpo": { "SimpleFeedForward": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": ag.Int(5, 25), }, "MQCNN": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": ag.Int(5, 25), }, "DeepAR": { "epochs": 5, "num_batches_per_epoch": 10, "context_length": ag.Int(5, 25), }, }, "default": { "AutoETS": {}, # "AutoARIMA": {}, "SimpleFeedForward": {}, "MQCNN": {}, "MQRNN": {}, "DeepAR": {}, "Transformer": {}, # "AutoTabular": {} # AutoTabular model is experimental. }, "default_hpo": { "MQCNN": { "context_length": ag.Int( min(prediction_length, max(10, 2 * prediction_length), 250), max(min(500, 12 * prediction_length), 4 * prediction_length), default=prediction_length * 4, ), }, "DeepAR": { "context_length": ag.Int( min(prediction_length, max(10, 2 * prediction_length), 250), max(min(500, 12 * prediction_length), prediction_length), default=prediction_length, ), }, "SimpleFeedForward": { "context_length": ag.Int( min(prediction_length, max(10, 2 * prediction_length), 250), max(min(500, 12 * prediction_length), prediction_length), default=prediction_length, ), }, "AutoETS": { "error": ag.Categorical("add", "mul") }, # "AutoARIMA": {"max_p": ag.Int(2, 4)} }, } return default_model_hps[key]