def default_dt_space(): space = HyperSpace() with space.as_default(): p_nets = MultipleChoice([ 'dnn_nets', 'linear', 'cin_nets', 'fm_nets', 'afm_nets', 'pnn_nets', 'cross_nets', 'cross_dnn_nets', 'dcn_nets', 'autoint_nets', 'fgcnn_dnn_nets', 'fibi_dnn_nets' ], num_chosen_most=3) dt_module = DTModuleSpace( nets=p_nets, auto_categorize=Bool(), cat_remain_numeric=Bool(), auto_discrete=Bool(), apply_gbm_features=Bool(), gbm_feature_type=Choice([ DT_consts.GBM_FEATURE_TYPE_DENSE, DT_consts.GBM_FEATURE_TYPE_EMB ]), embeddings_output_dim=Choice([4, 10, 20]), embedding_dropout=Choice([0, 0.1, 0.2, 0.3, 0.4, 0.5]), stacking_op=Choice( [DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT]), output_use_bias=Bool(), apply_class_weight=Bool(), earlystopping_patience=Choice([1, 3, 5])) dnn = DnnModule()(dt_module) fit = DTFit(batch_size=Choice([128, 256]))(dt_module) return space
def cnn_search_space(input_shape, output_units, output_activation='softmax', block_num_choices=[2, 3, 4, 5, 6], activation_choices=['relu'], filters_choices=[32, 64], kernel_size_choices=[(1, 1), (3, 3)]): space = HyperSpace() with space.as_default(): hp_use_bn = Bool() hp_pooling = Choice(list(range(2))) hp_filters = Choice(filters_choices) hp_kernel_size = Choice(kernel_size_choices) hp_fc_units = Choice([1024, 2048, 4096]) if len(activation_choices) == 1: hp_activation = activation_choices[0] else: hp_activation = Choice(activation_choices) hp_bn_act = Choice([seq for seq in itertools.permutations(range(2))]) input = Input(shape=input_shape) blocks = Repeat( lambda step: conv_block( block_no=step, hp_pooling=hp_pooling, hp_filters=hp_filters, hp_kernel_size=hp_kernel_size, hp_use_bn=hp_use_bn, hp_activation=hp_activation, hp_bn_act=hp_bn_act), repeat_times=block_num_choices)(input) x = Flatten()(blocks) x = Dense(units=hp_fc_units, activation=hp_activation, name='fc1')(x) x = Dense(units=hp_fc_units, activation=hp_activation, name='fc2')(x) x = Dense(output_units, activation=output_activation, name='predictions')(x) return space
def mini_dt_space(): space = HyperSpace() with space.as_default(): p_nets = MultipleChoice(['dnn_nets', 'linear', 'fm_nets'], num_chosen_most=2) dt_module = DTModuleSpace(nets=p_nets, auto_categorize=Bool(), cat_remain_numeric=Bool(), auto_discrete=Bool(), apply_gbm_features=Bool(), gbm_feature_type=Choice([ DT_consts.GBM_FEATURE_TYPE_DENSE, DT_consts.GBM_FEATURE_TYPE_EMB ]), embeddings_output_dim=Choice([4, 10]), embedding_dropout=Choice([0, 0.5]), stacking_op=Choice([ DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT ]), output_use_bias=Bool(), apply_class_weight=Bool(), earlystopping_patience=Choice([1, 3, 5])) dnn = DnnModule(hidden_units=Choice([100, 200]), reduce_factor=Choice([1, 0.8]), dnn_dropout=Choice([0, 0.3]), use_bn=Bool(), dnn_layers=2, activation='relu')(dt_module) fit = DTFit(batch_size=Choice([128, 256]))(dt_module) return space
def get_space(): space = HyperSpace() with space.as_default(): name_prefix = 'test_' filters = 64 in1 = Input(shape=( 28, 28, 1, )) in2 = Input(shape=( 28, 28, 1, )) ic1 = InputChoice([in1, in2], 1)([in1, in2]) or1 = ModuleChoice([ sepconv5x5(name_prefix, filters), sepconv3x3(name_prefix, filters), avgpooling3x3(name_prefix, filters), maxpooling3x3(name_prefix, filters), identity(name_prefix) ])(ic1) space.set_inputs([in1, in2]) return space
def get_space(): space = HyperSpace() with space.as_default(): id1 = Identity(p1=Choice(['a', 'b']), p2=Int(1, 100), p3=Real(0, 1.0)) return space
def get_space(): space = HyperSpace() with space.as_default(): p1 = Int(1, 100) p2 = Choice(['a', 'b', 'c']) p3 = Bool() p4 = Real(0.0, 1.0) id1 = Identity(p1=p1) id2 = Identity(p2=p2)(id1) id3 = Identity(p3=p3)(id2) id4 = Identity(p4=p4)(id3) return space
def get_space(): space = HyperSpace() with space.as_default(): filters = 64 in1 = Input(shape=( 28, 28, 1, )) conv_layer(hp_dict, 'normal', 0, [in1, in1], filters, 5) space.set_inputs(in1) return space
def get_space(): space = HyperSpace() with space.as_default(): filters = 64 in1 = Input(shape=( 28, 28, 1, ), dtype='float32') conv_node(hp_dict, 'normal', 0, 0, [in1, in1], filters) space.set_inputs(in1) return space
def func_space(func): space = HyperSpace() with space.as_default(): params = { name: copy.copy(v) for name, v in zip(func.__code__.co_varnames, func.__defaults__) if isinstance(v, ParameterSpace) } for _, v in params.items(): v.attach_to_space(space, v.name) input = HyperInput() id1 = Identity(**params)(input) space.set_outputs([id1]) return space
def get_space_p_in_p(): space = HyperSpace() with space.as_default(): p1 = Pipeline( [SimpleImputer(name='imputer1'), StandardScaler(name='scaler1')], name='p1') p2 = Pipeline( [SimpleImputer(name='imputer2'), StandardScaler(name='scaler2')], name='p2') input = HyperInput(name='input1') p3 = Pipeline([p1, p2], name='p3')(input) space.set_inputs(input) return space
def get_space_num_cat_pipeline_complex(dataframe_mapper_default=False, lightgbm_fit_kwargs={}, xgb_fit_kwargs={}, catboost_fit_kwargs={}): space = HyperSpace() with space.as_default(): input = HyperInput(name='input1') p1 = numeric_pipeline_complex()(input) p2 = categorical_pipeline_complex()(input) # p2 = categorical_pipeline_simple()(input) p3 = DataFrameMapper(default=dataframe_mapper_default, input_df=True, df_out=True, df_out_dtype_transforms=[(column_object, 'int') ])([p1, p2]) lightgbm_init_kwargs = { 'boosting_type': Choice(['gbdt', 'dart', 'goss']), 'num_leaves': Choice([11, 31, 101, 301, 501]), 'learning_rate': Real(0.001, 0.1, step=0.005), 'n_estimators': 100, 'max_depth': -1, 'tree_learner': 'data' # add for dask # subsample_for_bin = 200000, objective = None, class_weight = None, # min_split_gain = 0., min_child_weight = 1e-3, min_child_samples = 20, } lightgbm_est = LightGBMDaskEstimator(task='binary', fit_kwargs=lightgbm_fit_kwargs, **lightgbm_init_kwargs) xgb_init_kwargs = { 'tree_method': 'approx' # add for dask } xgb_est = XGBoostDaskEstimator(task='binary', fit_kwargs=xgb_fit_kwargs, **xgb_init_kwargs) # catboost_init_kwargs = { # 'silent': True # } # catboost_est = CatBoostEstimator(task='binary', fit_kwargs=catboost_fit_kwargs, **catboost_init_kwargs) # or_est = ModuleChoice([lightgbm_est, xgb_est, catboost_est], name='estimator_options')(p3) or_est = ModuleChoice([lightgbm_est, xgb_est], name='estimator_options')(p3) space.set_inputs(input) return space
def get_space_column_transformer(): space = HyperSpace() with space.as_default(): input = HyperInput(name='input1') p1 = Pipeline( [SimpleImputer(name='imputer1'), StandardScaler(name='scaler1')], columns=['a', 'b', 'c'], name='p1')(input) p2 = Pipeline( [SimpleImputer(name='imputer2'), StandardScaler(name='scaler2')], columns=['c', 'd'], name='p2')(input) p3 = ColumnTransformer()([p1, p2]) space.set_inputs(input) return space
def __call__(self, *args, **kwargs): space = HyperSpace() with space.as_default(): hyper_input = HyperInput(name='input1') estimators = [] if self.enable_dt: estimators.append(self.dt) if self.enable_dtr: estimators.append(self.dtr) if self.enable_lr: estimators.append(self.lr) if self.enable_nn: estimators.append(self.nn) modules = [ModuleSpace(name=f'{e["cls"].__name__}', **e) for e in estimators] outputs = ModuleChoice(modules)(hyper_input) space.set_inputs(hyper_input) return space
def tiny_dt_space(**hyperparams): space = HyperSpace() with space.as_default(): dt_module = DTModuleSpace(nets=['dnn_nets'], auto_categorize=Bool(), cat_remain_numeric=Bool(), auto_discrete=False, apply_gbm_features=False, stacking_op=Choice([ DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT ]), output_use_bias=Bool(), apply_class_weight=Bool(), earlystopping_patience=Choice([1, 3, 5])) dnn = DnnModule(hidden_units=Choice([10, 20]), reduce_factor=1, dnn_dropout=Choice([0, 0.3]), use_bn=False, dnn_layers=2, activation='relu')(dt_module) fit = DTFit(**hyperparams)(dt_module) return space
def get_space(): space = HyperSpace() with space.as_default(): filters = 64 in1 = Input(shape=( 28, 28, 1, )) conv = conv_cell(hp_dict, 'normal', 0, 0, 'L', [in1, in1], filters) space.set_inputs([in1, in1]) space.set_outputs(conv) return space
def enas_micro_search_space(arch='NRNR', input_shape=(28, 28, 1), init_filters=64, node_num=4, data_format=None, classes=10, classification_dropout=0, hp_dict={}, use_input_placeholder=True, weights_cache=None): space = HyperSpace() with space.as_default(): if use_input_placeholder: input = Input(shape=input_shape, name='0_input') else: input = None stem, input = stem_op(input, init_filters, data_format) node0 = stem node1 = stem reduction_no = 0 normal_no = 0 for l in arch: if l == 'N': normal_no += 1 type = 'normal' cell_no = normal_no is_reduction = False else: reduction_no += 1 type = 'reduction' cell_no = reduction_no is_reduction = True filters = (2**reduction_no) * init_filters if is_reduction: node0 = FactorizedReduction( filters, f'{normal_no + reduction_no}_{type}_C{cell_no}_0', data_format)(node0) node1 = FactorizedReduction( filters, f'{normal_no + reduction_no}_{type}_C{cell_no}_1', data_format)(node1) x = conv_layer(hp_dict, f'{normal_no + reduction_no}_{type}', cell_no, [node0, node1], filters, node_num, is_reduction) node0 = node1 node1 = x logit = classification(x, classes, classification_dropout, data_format) space.set_inputs(input) if weights_cache is not None: space.weights_cache = weights_cache return space
def get_space(): space = HyperSpace() with space.as_default(): id1 = Identity(p1=Int(0, 10), p2=Choice(['a', 'b'])) id2 = Identity(p3=Real(0., 1.), p4=Bool())(id1) return space
def get_space_2inputs(): space = HyperSpace() with space.as_default(): Pipeline([tow_inputs(), StandardScaler()]) return space
def get_space(): space = HyperSpace() with space.as_default(): Pipeline([SimpleImputer(), StandardScaler()]) return space
def get_space_2outputs(): space = HyperSpace() with space.as_default(): Pipeline([tow_outputs()]) return space