def convert_config_to_shadho(config): """Convert HPOBench config to a SHADHO search space. Parameters ---------- config : dict or `hpobench.Configuration` HPOBench model config drawn from ` Returns ------- space : dict or pyrameter.Specification The SHADHO translation of the HPOBench searh space configuration. """ # Create the shadho search space here and return it. space = {} for param in config.get_all_unconditional_hyperparameters(): param_type = type(config.get_hyperparameter(param)).__name__ lower = config.get_hyperparameter(param).lower upper = config.get_hyperparameter(param).upper log = config.get_hyperparameter(param).log print(param, param_type, log) # TODO: THE BELOW BREAKS FOR DIFFERENT TESTS WHEN USING LOG SPACES if param_type == 'UniformFloatHyperparameter' and log == False: space[param] = spaces.uniform(np.float64(lower), np.float64(upper)) elif param_type == 'UniformIntegerHyperparameter' and log == False: space[param] = spaces.randint(int(lower), int(upper)) elif param_type == 'UniformIntegerHyperparameter' and log == True: space[param] = spaces.randint(int(lower), int(upper)) elif param_type == 'UniformFloatHyperparameter' and log == True: space[param] = spaces.uniform(np.float64(lower), np.float64(upper)) else: raise TypeError( f'Unhandled HPOBench hyperparameter type {param_type}.' + \ 'Submit a bug report with the benchmark name and this message.' ) return space
if __name__ == '__main__': # As a part of the architecture search, we are interested in optimizing # the number of layers, size/shape of each layer, activation function, # and whether or not to attach a batch normalization layer. # Like with the SVM example, search spaces can be defined once and reused # in multiple places. activations = ['glu', 'leaky_relu', 'prelu', 'relu', 'selu', 'sigmoid', 'tanh'] batch_norm = spaces.log10_uniform(-4, 4) # For each convolutional layer, we sample over the number of convolutional # kernels, the kernel shape, activation function, and batch normalization. conv_layer = spaces.scope( out_filters=spaces.log2_randint(4, 10), kernel_shape=spaces.randint(1, 10, step=2), activation=activations, batch_norm=batch_norm ) # Additionally, we want to not worry about computing padding during model # construction. SHADHO offers *dependent* hyperparameter domains that # compute their value based on the value of another domain. The `padding` # domain here implements "same" padding. conv_layer.padding = spaces.dependent( conv_layer.kernel_shape, callback=lambda x: int(x // 2)) # Searching over a single convolutional layer is not enough though: we # want to search over the number of layers as well. SHADHO offers a
'kernel': 'rbf', # add the kernel name for convenience 'C': C, 'gamma': gamma }, 'sigmoid': { 'kernel': 'sigmoid', # add the kernel name for convenience 'C': C, 'gamma': gamma, 'coef0': coef0 }, 'poly': { 'kernel': 'poly', # add the kernel name for convenience 'C': C, 'gamma': gamma, 'coef0': coef0, 'degree': spaces.randint(2, 15) }, } # Set up the SHADHO driver like usual if args.pyrameter_model_sort in ['uniform_random', 'perceptron']: use_complexity = False use_priority = False else: use_complexity = True use_priority = True opt = Shadho( 'bash svm_task.sh', space, use_complexity=use_complexity,
prelu=spaces.scope( alpha_initializer=initializers, alpha_regularizer=regularizers, alpha_constraints=constraints), relu='relu', sigmoid='sigmoid', softmax='softmax', softplus='softplus', softsign='softsign', tanh='tanh', thresholded_relu=spaces.scope(theta=spaces.uniform(-1, 1))) # Set up a standard convolutional block that will search over all params that # can be tuned for U-Net conv = spaces.scope( kernel_size=spaces.randint(1, 12, 2), activation=activations, kernel_initializer=initializers, bias_initializer=initializers, kernel_regularizer=regularizers, bias_regularizer=regularizers, activity_regularizer=regularizers, kernel_constrains=constraints, bias_constraint=constraints) # Search over the built-in optimizers, parameterizing SGD optimizers = spaces.scope( exclusive=True sgd=spaces.scope( lr=spaces.log10_uniform(-4, -1), momentum=spaces.uniform(0, 1),