Exemple #1
0
def get_default_searchspace():
    params = {
        'lr':
        Real(5e-5, 5e-3, default=1e-3, log=True),
        'weight_decay':
        Real(1e-6, 5e-2, default=1e-6, log=True),
        'p_dropout':
        Categorical(0.1, 0, 0.2, 0.3, 0.4, 0.5),
        'n_heads':
        Categorical(8, 2, 4),
        'hidden_dim':
        Categorical(128, 32, 64, 256),
        'n_layers':
        Categorical(1, 2, 3, 4, 5),
        'feature_dim':
        Int(8, 128, default=64),
        'tab_readout':
        Categorical('none', 'readout_emb', 'mean', 'concat_pool',
                    'concat_pool_all', 'concat_pool_add', 'all_feat_embs',
                    'mean_feat_embs'),
        'num_output_layers':
        Categorical(2, 1, 3),
    }

    return params.copy()
Exemple #2
0
def get_searchspace_binary():
    spaces = {
        # See docs: https://docs.fast.ai/tabular.models.html
        'layers':
        Categorical(None, [200, 100], [200], [500], [1000], [500, 200],
                    [50, 25], [1000, 500], [200, 100, 50], [500, 200, 100],
                    [1000, 500, 200]),
        'emb_drop':
        Real(0.0, 0.5, default=0.1),
        'ps':
        Real(0.0, 0.5, default=0.1),
        'bs':
        Categorical(256, 64, 128, 512, 1024, 2048, 4096),
        'lr':
        Real(5e-5, 1e-1, default=1e-2, log=True),
        'epochs':
        Int(lower=5, upper=30, default=30),
        'early.stopping.min_delta':
        0.0001,
        'early.stopping.patience':
        20,
        'smoothing':
        Real(0.0, 0.3, default=0.0, log=True),
    }
    return spaces
Exemple #3
0
def get_default_searchspace(problem_type, framework, num_classes=None):
    params = {
        'learning_rate': Real(1e-4, 3e-2, default=3e-4, log=True),
        'weight_decay': Real(1e-12, 0.1, default=1e-6, log=True),
        'dropout_prob': Categorical(0.1, 0.0, 0.5, 0.2, 0.3, 0.4),
        'embedding_size_factor': Categorical(1.0, 0.5, 1.5, 0.7, 0.6, 0.8, 0.9, 1.1, 1.2, 1.3, 1.4),
        'proc.embed_min_categories': Categorical(4, 3, 10, 100, 1000),
        'proc.impute_strategy': Categorical('median', 'mean', 'most_frequent'),
        'proc.max_category_levels': Categorical(100, 10, 20, 200, 300, 400, 500, 1000, 10000),
        'proc.skew_threshold': Categorical(0.99, 0.2, 0.3, 0.5, 0.8, 0.9, 0.999, 1.0, 10.0, 100.0),
    }
    mxnet_params = {
        'use_batchnorm': Categorical(True, False),
        'layers': Categorical(None, [200, 100], [256], [100, 50], [200, 100, 50], [1024], [32], [300, 150]),
        'network_type': Categorical('widedeep', 'feedforward'),
        'activation': Categorical('relu', 'softrelu'),
        'batch_size': Categorical(512, 1024, 2056, 128),
    }
    pytorch_params = {
        'use_batchnorm': Categorical(False, True),
        'num_layers': Categorical(2, 3, 4),
        'hidden_size': Categorical(128, 256, 512),
        'activation': Categorical('relu', 'elu'),
    }
    params = merge_framework_params(framework=framework, shared_params=params, mxnet_params=mxnet_params, pytorch_params=pytorch_params)
    if problem_type == QUANTILE:
        problem_params =  get_searchspace_quantile(framework)
    elif problem_type == BINARY:
        problem_params = get_searchspace_binary(framework)
    elif problem_type == MULTICLASS:
        problem_params = get_searchspace_multiclass(framework, num_classes=num_classes)
    elif problem_type == REGRESSION:
        problem_params = get_searchspace_regression(framework)
    params.update(problem_params)
    return params.copy()
Exemple #4
0
def get_searchspace_multiclass_baseline(num_classes):
    params = {
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.1, log=True),
        'depth': Int(lower=5, upper=8, default=6),
        'l2_leaf_reg': Real(lower=1, upper=5, default=3),
    }
    return params
def get_searchspace_regression_baseline():
    params = {
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.05, log=True),
        'depth': Int(lower=5, upper=8, default=6),
        'l2_leaf_reg': Real(lower=1, upper=5, default=3),
    }
    return params
Exemple #6
0
def get_searchspace_regression_baseline():
    params = {
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.05, log=True),
        'feature_fraction': Real(lower=0.75, upper=1.0, default=1.0),
        'min_data_in_leaf': Int(lower=2, upper=60, default=20),
        'num_leaves': Int(lower=16, upper=96, default=31),
    }
    return params
Exemple #7
0
def get_searchspace_quantile(framework):
    if framework != 'pytorch':
        raise ValueError("Only pytorch tabular neural network is currently supported for quantile regression.")
    params = {
        'activation': Categorical('relu', 'elu', 'tanh'),
        'weight_decay': Real(1e-12, 1.0, default=1e-6, log=True),
        'gamma': Real(0.1, 10.0, default=5.0),
        'alpha': Categorical(0.001, 0.01, 0.1, 1.0),
    }
    return params
Exemple #8
0
def get_searchspace_quantile():
    params = {
        'learning_rate': Real(1e-4, 3e-2, default=3e-4, log=True),
        'weight_decay': Real(1e-12, 0.1, default=1e-6, log=True),
        'dropout_prob': Real(0.0, 0.5, default=0.1),
        'gamma': Real(0.1, 10.0, default=5.0),
        'num_layers': Categorical(2, 3, 4),
        'hidden_size': Categorical(128, 256, 512),
        'embedding_size_factor': Real(0.5, 1.5, default=1.0),
        'alpha': Categorical(0.001, 0.01, 0.1, 1.0),
    }
    return params
def get_default_searchspace():
    params = {
        'lr': Real(5e-5, 5e-3, default=1e-3, log=True),
        'weight_decay': Real(1e-6, 5e-2, default=1e-6, log=True),
        'p_dropout': Categorical(0.1, 0, 0.5),
        'n_heads': Categorical(8, 4),
        'hidden_dim': Categorical(128, 32, 64, 256),
        'n_layers': Categorical(2, 1, 3, 4, 5),
        'feature_dim': Int(8, 128, default=64),
        'num_output_layers': Categorical(1, 2),
    }

    return params.copy()
Exemple #10
0
def get_searchspace_multiclass_baseline():
    params = {
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.05, log=True),
        'feature_fraction': Real(lower=0.75, upper=1.0, default=1.0),
        'min_data_in_leaf': Int(
            lower=2, upper=60, default=20
        ),  # TODO: Use size of dataset to set upper, if row count is small upper should be small
        'num_leaves': Int(
            lower=16, upper=96, default=31
        ),  # TODO: Use row count and feature count to set this, the higher feature count the higher num_leaves upper
        # TODO: Bin size max increase
    }
    return params
Exemple #11
0
def get_searchspace_regression():
    params = {
        'learning_rate': Real(1e-4, 3e-2, default=3e-4, log=True),
        'weight_decay': Real(1e-12, 0.1, default=1e-6, log=True),
        'dropout_prob': Real(0.0, 0.5, default=0.1),
        # 'layers': Categorical(None, [200, 100], [256], [2056], [1024, 512, 128], [1024, 1024, 1024]),
        'layers': Categorical(None, [200, 100], [256], [100, 50], [200, 100, 50], [50, 25], [300, 150]),
        'embedding_size_factor': Real(0.5, 1.5, default=1.0),
        'network_type': Categorical('widedeep', 'feedforward'),
        'use_batchnorm': Categorical(True, False),
        'activation': Categorical('relu', 'softrelu', 'tanh'),
        # 'batch_size': Categorical(512, 1024, 2056, 128), # this is used in preprocessing so cannot search atm
    }
    return params
Exemple #12
0
def get_searchspace_regression_baseline():
    params = {
        'objective': 'regression',
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.1, log=True),
        'feature_fraction': Real(lower=0.75, upper=1.0, default=1.0),
        'min_data_in_leaf': Int(lower=2, upper=30, default=20),
        'num_leaves': Int(lower=16, upper=96, default=31),
        'num_boost_round': DEFAULT_NUM_BOOST_ROUND,
        'boosting_type': 'gbdt',
        'verbose': -1,
        'two_round': True,
        'seed_value': None,
    }
    return params
Exemple #13
0
def get_searchspace_multiclass(num_classes):
    # Search space we use by default (only specify non-fixed hyperparameters here):  # TODO: move to separate file
    params = {
        'learning_rate': Real(1e-4, 3e-2, default=3e-4, log=True),
        'weight_decay': Real(1e-12, 0.1, default=1e-6, log=True),
        'dropout_prob': Real(0.0, 0.5, default=0.1),
        # 'layers': Categorical(None, [200, 100], [256], [2056], [1024, 512, 128], [1024, 1024, 1024]),
        'layers': Categorical(None, [200, 100], [256], [100, 50], [200, 100, 50], [50, 25], [300, 150]),
        'embedding_size_factor': Real(0.5, 1.5, default=1.0),
        'network_type': Categorical('widedeep', 'feedforward'),
        'use_batchnorm': Categorical(True, False),
        'activation': Categorical('relu', 'softrelu'),
        # 'batch_size': Categorical(512, 1024, 2056, 128), # this is used in preprocessing so cannot search atm
    }
    return params
def test_LocalSequentialScheduler_no_criteria():
    search_space = {'lr': Real(1e-2, 1e-1, log=True)}

    def _train_fn_():
        pass

    with pytest.raises(AssertionError, match="Need stopping criterion: Either num_trials or time_out"):
        LocalSequentialScheduler(train_fn=_train_fn_, search_space=search_space, reward_attr='reward_attr', resource={})
Exemple #15
0
def get_searchspace_multiclass_baseline(num_classes):
    params = {
        'objective': 'multiclass',
        'num_classes': num_classes,
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.1, log=True),
        'feature_fraction': Real(lower=0.75, upper=1.0, default=1.0),
        'min_data_in_leaf': Int(lower=2, upper=30, default=20),  # TODO: Use size of dataset to set upper, if row count is small upper should be small
        'num_leaves': Int(lower=16, upper=96, default=31),  # TODO: Use row count and feature count to set this, the higher feature count the higher num_leaves upper
        'num_boost_round': DEFAULT_NUM_BOOST_ROUND,
        'boosting_type': 'gbdt',
        'verbose': -1,
        'two_round': True,
        'seed_value': None,
        # 'device': 'gpu'  # needs GPU-enabled lightGBM build
        # TODO: Bin size max increase
    }
    return params
Exemple #16
0
def get_searchspace_regression(framework):
    params = {
        'weight_decay': Real(1e-12, 1.0, default=1e-6, log=True),
    }
    mxnet_params = {
        'activation': Categorical('relu', 'softrelu', 'tanh'),
    }
    pytorch_params = {
        'activation': Categorical('relu', 'elu', 'tanh'),
    }
    return merge_framework_params(framework=framework, shared_params=params, mxnet_params=mxnet_params, pytorch_params=pytorch_params)
Exemple #17
0
def get_base_searchspace():
    base_params = {
        'n_estimators': DEFAULT_NUM_BOOST_ROUND,
        'booster': 'gbtree',
        'n_jobs': os.cpu_count(), # TODO: xgboost plans to accept -1 for compability with other packages. After that, resolving this issue.
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.1, log=True),
        'max_depth': Int(lower=3, upper=10, default=3),
        'min_child_weight': Int(lower=1, upper=5, default=1),
        'gamma': Real(lower=0, upper=5, default=0.01),
        'subsample': Real(lower=0.5, upper=1.0, default=1.0),
        'colsample_bytree': Real(lower=0.5, upper=1.0, default=1.0),
        'reg_alpha': Real(lower=0.0, upper=10.0, default=0.0),
        'reg_lambda': Real(lower=0.0, upper=10.0, default=1.0),
    }
    return base_params
Exemple #18
0
def get_base_searchspace():
    base_params = {
        'n_estimators': DEFAULT_NUM_BOOST_ROUND,
        'booster': 'gbtree',
        'n_jobs': -1,
        'learning_rate': Real(lower=5e-3, upper=0.2, default=0.1, log=True),
        'max_depth': Int(lower=3, upper=10, default=6),
        'min_child_weight': Int(lower=1, upper=5, default=1),
        'gamma': Real(lower=0, upper=5, default=0.01),
        'subsample': Real(lower=0.5, upper=1.0, default=1.0),
        'colsample_bytree': Real(lower=0.5, upper=1.0, default=1.0),
        'reg_alpha': Real(lower=0.0, upper=10.0, default=0.0),
        'reg_lambda': Real(lower=0.0, upper=10.0, default=1.0),
    }
    return base_params
Exemple #19
0
    parser.add_argument('--current-host',
                        type=str,
                        default=os.environ['SM_CURRENT_HOST'])
    parser.add_argument('--hosts',
                        type=list,
                        default=json.loads(os.environ['SM_HOSTS']))

    args = parser.parse_args()

    # Set hyperparameters after parsing the arguments
    num_epochs = args.epochs
    model_dir = args.model_dir
    training_dir = args.train

    #autogluon
    learning_rate = Real(2e-06, 2e-05, log=True)
    dataset = task.Dataset(filepath=os.path.join(training_dir, 'train.csv'),
                           usecols=['text', 'target'])
    predictor = task.fit(dataset,
                         lr=learning_rate,
                         epochs=num_epochs,
                         pretrained_dataset='wiki_multilingual_uncased')
    cloudpickle.dump(predictor, open('%s/model' % model_dir, 'wb'))


def model_fn(model_dir):
    """
    Load the gluon model. Called once when hosting service starts.
    :param: model_dir The directory where model files are stored.
    :return: a model
    """
            return f(*gargs, **gkwargs)

        return _call

    return _unpack_inner


# Dictionary of preset fit() parameter configurations for ImagePredictor.
preset_image_predictor = dict(
    # Best predictive accuracy with little consideration to inference time or model size. Achieve even better results by specifying a large time_limit value.
    # Recommended for applications that benefit from the best possible model accuracy.
    best_quality={
        'hyperparameters': {
            'model': Categorical('coat_lite_small', 'twins_pcpvt_base', 'swin_base_patch4_window7_224', 'resnet101d') \
                if timm != None else Categorical('resnet50_v1b', 'resnet101_v1d', 'resnest200'),
            'lr': Real(1e-5, 1e-2, log=True),
            'batch_size': Categorical(8, 16, 32, 64),
            'epochs': 200,
            'early_stop_patience': 50
            },
        'hyperparameter_tune_kwargs': {
            'num_trials': 1024,
            'searcher': 'random',
        },
        'time_limit': 12*3600,
    },

    # Good predictive accuracy with fast inference.
    # Recommended for applications that require reasonable inference speed and/or model size.
    good_quality_fast_inference={
        'hyperparameters': {
Exemple #21
0
def get_searchspace_regression():
    params = {
        'weight_decay': Real(1e-12, 1.0, default=1e-6, log=True),
        'activation': Categorical('relu', 'softrelu', 'tanh'),
    }
    return params