def get_knob_config(): return { 'epochs': FixedKnob(2), 'hidden_layer_count': IntegerKnob(1, 2), 'hidden_layer_units': IntegerKnob(2, 128), 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), 'image_size': FixedKnob(32) }
def get_knob_config(): return { 'epochs': FixedKnob(10), 'word_embed_dims': IntegerKnob(16, 128), 'word_rnn_hidden_size': IntegerKnob(16, 128), 'word_dropout': FloatKnob(1e-3, 2e-1, is_exp=True), 'learning_rate': FloatKnob(1e-2, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), }
def get_knob_config(): return { 'n_estimators': IntegerKnob(50, 200), 'min_child_weight': IntegerKnob(1, 6), 'max_depth': IntegerKnob(1, 10), 'gamma': FloatKnob(0.0, 1.0, is_exp=False), 'subsample': FloatKnob(0.5, 1.0, is_exp=False), 'colsample_bytree': FloatKnob(0.1, 0.7, is_exp=False) }
def get_knob_config(): return { 'max_epochs': FixedKnob(10), 'hidden_layer_count': IntegerKnob(1, 2), 'hidden_layer_units': IntegerKnob(2, 128), 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), 'max_image_size': CategoricalKnob([16, 32, 48]), 'quick_train': PolicyKnob('EARLY_STOP') # Whether early stopping would be used }
def get_knob_config(): return { 'max_iter': IntegerKnob(10, 40 if APP_MODE != 'DEV' else 10), 'kernel': CategoricalKnob(['rbf', 'linear']), 'gamma': CategoricalKnob(['scale', 'auto']), 'C': FloatKnob(1e-2, 1e2, is_exp=True) }
def get_knob_config(): return { 'max_depth': IntegerKnob(1, 32), 'splitter': CategoricalKnob(['best', 'random']), 'criterion': CategoricalKnob(['gini', 'entropy']), 'max_image_size': CategoricalKnob([16, 32]) }
def get_knob_config(): return { 'int': IntegerKnob(1, 32), 'float': FloatKnob(1e-5, 1), 'cat': CategoricalKnob(['a', 'b', 'c']), 'fixed': FixedKnob('fixed') }
def test_standard_knobs(self, budget): knob_config = { 'int': IntegerKnob(2, 128), 'float': FloatKnob(1e-5, 1e-1, is_exp=True), 'cat': CategoricalKnob([16, 32, 64, 128]), } advisor = make_advisor(knob_config, budget) assert isinstance(advisor, BayesOptAdvisor)
def test_standard_knobs_with_params_sharing(self, budget): knob_config = { 'int': IntegerKnob(2, 128), 'float': FloatKnob(1e-5, 1e-1, is_exp=True), 'cat': CategoricalKnob([16, 32, 64, 128]), 'share_params': PolicyKnob('SHARE_PARAMS') } advisor = make_advisor(knob_config, budget) assert isinstance(advisor, BayesOptWithParamSharingAdvisor)
def test_standard_knobs_with_early_stop(self, budget): knob_config = { 'int': IntegerKnob(2, 128), 'float': FloatKnob(1e-5, 1e-1, is_exp=True), 'cat': CategoricalKnob([16, 32, 64, 128]), 'early_stop': PolicyKnob('EARLY_STOP') } advisor = make_advisor(knob_config, budget) assert isinstance(advisor, BayesOptAdvisor)
def get_knob_config(): return { 'max_depth': IntegerKnob(1, 32), 'criterion': CategoricalKnob(['gini', 'entropy']) }
def get_knob_config(): return { 'max_depth': IntegerKnob(2, 16 if APP_MODE != 'DEV' else 4), 'criterion': CategoricalKnob(['gini', 'entropy']) }
def get_knob_config(): return { 'epochs': IntegerKnob(1, 1 if APP_MODE != 'DEV' else 10), 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), }