Ejemplo n.º 1
0
 def get_knob_config():
     return {
         'epochs': FixedKnob(15),
         'learning_rate': FloatKnob(0.001, 0.07),
         'decay_rate': FloatKnob(5e-5, 1e-4, is_exp=True),
         'momentum': FloatKnob(0.1, 0.3, 0.6),
         'batch_size': CategoricalKnob([32, 64, 128]),
         'max_image_size': FixedKnob(28)
     }
Ejemplo n.º 2
0
 def get_knob_config():
     return {
         'max_epochs': FixedKnob(10),
         'learning_rate': FloatKnob(1e-5, 1e-2, is_exp=True),
         'batch_size': CategoricalKnob([16, 32, 64, 128]),
         'max_image_size': CategoricalKnob([32, 64, 128, 224]),
     }
Ejemplo n.º 3
0
 def get_knob_config():
     return {
         'epochs': FixedKnob(15),
         'batch_size': CategoricalKnob([32, 64, 128]),
         'l_rate': FloatKnob(0.0001, 0.001, 0.01),
         'max_image_size': CategoricalKnob([28, 32])
     }
Ejemplo n.º 4
0
    def get_knob_config():
        return {
            'trial_epochs': FixedKnob(300),
            'lr': FloatKnob(1e-4, 1, is_exp=True),
            'lr_decay': FloatKnob(1e-3, 1e-1, is_exp=True),
            'opt_momentum': FloatKnob(0.7, 1, is_exp=True),
            'opt_weight_decay': FloatKnob(1e-5, 1e-3, is_exp=True),
            'batch_size': CategoricalKnob([32, 64, 128]),
            'drop_rate': FloatKnob(0, 0.4),
            'max_image_size': FixedKnob(32),
            'share_params': PolicyKnob('SHARE_PARAMS'),

            # Affects whether training is shortened by using early stopping
            'quick_train': PolicyKnob('EARLY_STOP'),
            'early_stop_train_val_samples': FixedKnob(1024),
            'early_stop_patience_epochs': FixedKnob(5)
        }
Ejemplo n.º 5
0
 def get_knob_config():
     return {
         'max_iter': FixedKnob(20),
         'kernel': CategoricalKnob(['rbf', 'linear', 'poly']),
         'gamma': CategoricalKnob(['scale', 'auto']),
         'C': FloatKnob(1e-4, 1e4, is_exp=True),
         'max_image_size': CategoricalKnob([16, 32])
     }
Ejemplo n.º 6
0
 def get_knob_config():
     return {
         'epochs': FixedKnob(1),
         'word_embed_dims': IntegerKnob(16, 128),
         'word_rnn_hidden_size': IntegerKnob(16, 128),
         'word_dropout': FloatKnob(1e-3, 2e-1, is_exp=True),
         'learning_rate': FloatKnob(1e-2, 1e-1, is_exp=True),
         'batch_size': CategoricalKnob([16, 32, 64, 128]),
     }
Ejemplo n.º 7
0
 def get_knob_config():
     return {
         'max_epochs': FixedKnob(10),
         'hidden_layer_count': IntegerKnob(1, 2),
         'hidden_layer_units': IntegerKnob(2, 128),
         'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True),
         'batch_size': CategoricalKnob([16, 32, 64, 128]),
         'max_image_size': CategoricalKnob([16, 32, 48]),
         'quick_train':
         PolicyKnob('EARLY_STOP')  # Whether early stopping would be used
     }
Ejemplo n.º 8
0
 def get_knob_config():
     return {
         'to_eval': FixedKnob(False),
     }
Ejemplo n.º 9
0
 def get_knob_config():
     return {
         'min_value': FixedKnob(-9999999999)  # Min numeric value
     }
Ejemplo n.º 10
0
    def get_knob_config():
        return {
            # Learning parameters
            'lr': FixedKnob(0.0001),  ### learning_rate
            'weight_decay': FixedKnob(0.0),
            'drop_rate': FixedKnob(0.0),
            'max_epochs': FixedKnob(1),
            'batch_size': CategoricalKnob([32]),
            'max_iter': FixedKnob(20),
            'optimizer': CategoricalKnob(['adam']),
            'scratch': FixedKnob(True),

            # Data augmentation
            'max_image_size': FixedKnob(32),
            'share_params': CategoricalKnob(['SHARE_PARAMS']),
            'tag': CategoricalKnob(['relabeled']),
            'workers': FixedKnob(8),
            'seed': FixedKnob(123456),
            'scale': FixedKnob(512),
            'horizontal_flip': FixedKnob(True),

            # Self-paced Learning and Loss Revision
            'enable_spl': FixedKnob(True),
            'spl_threshold_init': FixedKnob(16.0),
            'spl_mu': FixedKnob(1.3),
            'enable_lossrevise': FixedKnob(False),
            'lossrevise_slop': FixedKnob(2.0),

            # Label Adaptation
            'enable_label_adaptation': FixedKnob(True),

            # GM Prior Regularization
            'enable_gm_prior_regularization': FixedKnob(False),
            'gm_prior_regularization_a': FixedKnob(0.001),
            'gm_prior_regularization_b': FixedKnob(0.0001),
            'gm_prior_regularization_alpha': FixedKnob(0.5),
            'gm_prior_regularization_num': FixedKnob(4),
            'gm_prior_regularization_lambda': FixedKnob(0.0001),
            'gm_prior_regularization_upt_freq': FixedKnob(100),
            'gm_prior_regularization_param_upt_freq': FixedKnob(50),

            # Explanation
            'enable_explanation': FixedKnob(False),
            'explanation_gradcam': FixedKnob(True),
            'explanation_lime': FixedKnob(True),

            # Model Slicing
            'enable_model_slicing': FixedKnob(False),
            'model_slicing_groups': FixedKnob(0),
            'model_slicing_rate': FixedKnob(1.0),
            'model_slicing_scheduler_type': FixedKnob('randomminmax'),
            'model_slicing_randnum': FixedKnob(1),

            # MC Dropout
            'enable_mc_dropout': FixedKnob(False),
            'mc_trials_n': FixedKnob(10)
        }
Ejemplo n.º 11
0
 def get_knob_config():
     return {
         'c1': FloatKnob(0.001, 0.01),
         'c2': FloatKnob(0.01, 0.1),
         'max_iterations': FixedKnob(10)
     }