def get_knob_config(): return { 'max_epochs': FixedKnob(10), 'learning_rate': FloatKnob(1e-5, 1e-2, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), 'max_image_size': CategoricalKnob([32, 64, 128, 224]), }
def get_knob_config(): return { 'n_estimators': IntegerKnob(50, 200), 'oob_score': CategoricalKnob([True, False]), 'max_depth': IntegerKnob(10, 100), 'max_features': CategoricalKnob(['auto', 'sqrt', 'log2']) }
def get_knob_config(): return { 'max_depth': IntegerKnob(1, 32), 'splitter': CategoricalKnob(['best', 'random']), 'criterion': CategoricalKnob(['gini', 'entropy']), 'max_image_size': CategoricalKnob([16, 32]) }
def get_knob_config(): return { 'epochs': FixedKnob(15), 'batch_size': CategoricalKnob([32, 64, 128]), 'l_rate': FloatKnob(0.0001, 0.001, 0.01), 'max_image_size': CategoricalKnob([28, 32]) }
def get_knob_config(): return { 'model_class': CategoricalKnob(['resnent101_mnist']), # Learning parameters 'lr': FixedKnob(0.0001), ### learning_rate 'weight_decay': FixedKnob(0.0), 'drop_rate': FixedKnob(0.0), 'max_epochs': FixedKnob(30), 'batch_size': CategoricalKnob([200]), 'max_iter': FixedKnob(20), 'optimizer': CategoricalKnob(['adam']), 'scratch': FixedKnob(True), # Data augmentation 'max_image_size': FixedKnob(32), 'share_params': CategoricalKnob(['SHARE_PARAMS']), 'tag': CategoricalKnob(['relabeled']), 'workers': FixedKnob(8), 'seed': FixedKnob(123456), 'scale': FixedKnob(512), 'horizontal_flip': FixedKnob(True), # Hyperparameters for PANDA modules # Self-paced Learning and Loss Revision 'enable_spl': FixedKnob(False), 'spl_threshold_init': FixedKnob(16.0), 'spl_mu': FixedKnob(1.3), 'enable_lossrevise': FixedKnob(False), 'lossrevise_slop': FixedKnob(2.0), # Label Adaptation 'enable_label_adaptation': FixedKnob(False), # error occurs # GM Prior Regularization 'enable_gm_prior_regularization': FixedKnob(True), 'gm_prior_regularization_a': FixedKnob(0.001), 'gm_prior_regularization_b': FixedKnob(0.0001), 'gm_prior_regularization_alpha': FixedKnob(0.5), 'gm_prior_regularization_num': FixedKnob(4), 'gm_prior_regularization_lambda': FixedKnob(0.0001), 'gm_prior_regularization_upt_freq': FixedKnob(100), 'gm_prior_regularization_param_upt_freq': FixedKnob(50), # Explanation 'enable_explanation': FixedKnob(False), 'explanation_gradcam': FixedKnob(True), 'explanation_lime': FixedKnob(False), # Model Slicing 'enable_model_slicing': FixedKnob(True), 'model_slicing_groups': FixedKnob(0), 'model_slicing_rate': FixedKnob(1.0), 'model_slicing_scheduler_type': FixedKnob('randomminmax'), 'model_slicing_randnum': FixedKnob(1), # MC Dropout 'enable_mc_dropout': FixedKnob(False), 'mc_trials_n': FixedKnob(10) }
def get_knob_config(): return { 'max_iter': FixedKnob(20), 'kernel': CategoricalKnob(['rbf', 'linear', 'poly']), 'gamma': CategoricalKnob(['scale', 'auto']), 'C': FloatKnob(1e-4, 1e4, is_exp=True), 'max_image_size': CategoricalKnob([16, 32]) }
def get_knob_config(): return { 'penalty': CategoricalKnob(['l1', 'l2']), 'tol': FloatKnob(0.0001, 0.001), 'C': IntegerKnob(4, 15), 'fit_intercept': CategoricalKnob([True, False]), 'solver': CategoricalKnob(['lbfgs', 'liblinear']), }
def get_knob_config(): return { # Learning parameters 'lr': FixedKnob(0.0001), 'weight_decay': FixedKnob(0.0), 'drop_rate': FixedKnob(0.0), 'max_epochs': FixedKnob(10), # original 5 'batch_size': CategoricalKnob([96]), # original 32 'max_iter': FixedKnob(20), 'optimizer': CategoricalKnob(['adam']), 'scratch': FixedKnob(True), # Data augmentation 'max_image_size': FixedKnob(32), 'share_params': CategoricalKnob(['SHARE_PARAMS']), 'tag': CategoricalKnob(['relabeled']), 'workers': FixedKnob(8), 'seed': FixedKnob(123456), 'scale': FixedKnob(512), 'horizontal_flip': FixedKnob(True), # Self-paced Learning and Loss Revision 'enable_spl': FixedKnob(True), 'spl_threshold_init': FixedKnob(16.0), 'spl_mu': FixedKnob(1.3), 'enable_lossrevise': FixedKnob(False), 'lossrevise_slop': FixedKnob(2.0), # Label Adaptation 'enable_label_adaptation': FixedKnob(False), # GM Prior Regularization 'enable_gm_prior_regularization': FixedKnob(False), 'gm_prior_regularization_a': FixedKnob(0.001), 'gm_prior_regularization_b': FixedKnob(0.0001), 'gm_prior_regularization_alpha': FixedKnob(0.5), 'gm_prior_regularization_num': FixedKnob(4), 'gm_prior_regularization_lambda': FixedKnob(0.0001), 'gm_prior_regularization_upt_freq': FixedKnob(100), 'gm_prior_regularization_param_upt_freq': FixedKnob(50), # Explanation 'enable_explanation': FixedKnob(False), 'explanation_method': FixedKnob('lime'), # Model Slicing 'enable_model_slicing': FixedKnob(False), 'model_slicing_groups': FixedKnob(0), 'model_slicing_rate': FixedKnob(1.0), 'model_slicing_scheduler_type': FixedKnob('randomminmax'), 'model_slicing_randnum': FixedKnob(1), # SelectiveNet 'selectionheadloss_weight': FixedKnob(0.5), 'target_coverage': FixedKnob(0.8), 'lamda': FixedKnob(32) }
def get_knob_config(): return { 'alpha': FloatKnob(0.001, 0.01), 'normalize': CategoricalKnob([True, False]), 'copy_X': CategoricalKnob([True, False]), 'tol': FloatKnob(1e-05, 1e-04), 'solver': CategoricalKnob(['svd', 'sag']), 'random_state': IntegerKnob(1, 123) }
def get_knob_config(): return { 'criterion': CategoricalKnob(['mse', 'mae']), 'splitter': CategoricalKnob(['best', 'random']), 'min_samples_split': IntegerKnob(2, 5), 'max_features': CategoricalKnob(['auto', 'sqrt']), 'random_state': IntegerKnob(1, 123), 'min_impurity_decrease': FloatKnob(0.0, 0.2), 'min_impurity_split': FloatKnob(1e-07, 1e-03) }
def get_knob_config(): return { 'max_epochs': FixedKnob(10), 'hidden_layer_count': IntegerKnob(1, 2), 'hidden_layer_units': IntegerKnob(2, 128), 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), 'max_image_size': CategoricalKnob([16, 32, 48]), 'quick_train': PolicyKnob('EARLY_STOP') # Whether early stopping would be used }
def get_knob_config(): return { 'C': IntegerKnob(2, 3), 'kernel': CategoricalKnob(['poly', 'rbf', 'linear']), 'degree': IntegerKnob(2, 3), 'gamma': CategoricalKnob(['scale', 'auto']), 'coef0': FloatKnob(0.0, 0.1), 'shrinking': CategoricalKnob([True, False]), 'tol': FloatKnob(1e-03, 1e-01, is_exp=True), 'decision_function_shape': CategoricalKnob(['ovo', 'ovr']), 'probability': CategoricalKnob([True, False]), }
def get_knob_config(): return { 'C': FloatKnob(1.0, 1.5), 'tol': FloatKnob(1e-03, 1e-01, is_exp=True), 'validation_fraction': FloatKnob(0.01, 0.1), 'n_iter_no_change': IntegerKnob(3, 5), 'shuffle': CategoricalKnob([True, False]), 'loss': CategoricalKnob(['hinge', 'squared_hinge']), 'random_state': IntegerKnob(1, 2), 'warm_start': CategoricalKnob([True, False]), 'average': IntegerKnob(1, 5), }
def get_knob_config(): return { 'epochs': FixedKnob(15), 'learning_rate': FloatKnob(0.001, 0.07), 'decay_rate': FloatKnob(5e-5, 1e-4, is_exp=True), 'momentum': FloatKnob(0.1, 0.3, 0.6), 'batch_size': CategoricalKnob([32, 64, 128]), 'max_image_size': FixedKnob(28) }
def get_knob_config(): return { 'epochs': FixedKnob(1), 'word_embed_dims': IntegerKnob(16, 128), 'word_rnn_hidden_size': IntegerKnob(16, 128), 'word_dropout': FloatKnob(1e-3, 2e-1, is_exp=True), 'learning_rate': FloatKnob(1e-2, 1e-1, is_exp=True), 'batch_size': CategoricalKnob([16, 32, 64, 128]), }
def get_knob_config(): return { 'trial_epochs': FixedKnob(300), 'lr': FloatKnob(1e-4, 1, is_exp=True), 'lr_decay': FloatKnob(1e-3, 1e-1, is_exp=True), 'opt_momentum': FloatKnob(0.7, 1, is_exp=True), 'opt_weight_decay': FloatKnob(1e-5, 1e-3, is_exp=True), 'batch_size': CategoricalKnob([32, 64, 128]), 'drop_rate': FloatKnob(0, 0.4), 'max_image_size': FixedKnob(32), 'share_params': PolicyKnob('SHARE_PARAMS'), # Affects whether training is shortened by using early stopping 'quick_train': PolicyKnob('EARLY_STOP'), 'early_stop_train_val_samples': FixedKnob(1024), 'early_stop_patience_epochs': FixedKnob(5) }
def get_knob_config(): return { 'epoch': IntegerKnob(5, 10), 'learning_rate': FloatKnob(1e-3, 1e-1, is_exp=True), 'layer_dim': CategoricalKnob([50, 100, 250]) }
def get_knob_config(): return { 'n_neighbors': IntegerKnob(3, 4, 6), 'metric': CategoricalKnob(['minkowski', 'euclidean']), 'p': IntegerKnob(1, 2), }