def test_domain():
    params = {
        'a': range(10),
        'b': np.random.randint(0, 1e6, size=50),
        'd': [1, 0.1, 2.0],
        'e': ['a', 1, 'b', 'c', None],
        'f': ['1', '-1'],
        'h': [True, False]
    }
    ds = domain_space(params, domain_size=1000)
    assert all(k in ds.mapping_int for k in ['a', 'b'])
    assert all(k in ds.mapping_categorical for k in ['d', 'e', 'f', 'h'])
    samples = ds.get_domain()

    for sample in samples:
        for param in params.keys():
            assert (sample[param] in params[param])

    params = {
        'a': [1],
    }
    ds = domain_space(params, domain_size=1000)
    assert all(k in ds.mapping_int for k in ['a'])
    samples = ds.get_domain()

    for sample in samples:
        for param in params.keys():
            assert (sample[param] in params[param])
Exemple #2
0
def test_domain():
    ds = domain_space(param_dict, domain_size)

    # getting the samples from the domain
    domain_list = ds.get_domain()

    # number of samples should have same size as the domain_size
    assert len(domain_list) == domain_size

    # change this into the GP domain space
    domain_np = ds.convert_GP_space(domain_list)

    # in gaussian space verifying correctness of samples sizes and structure
    assert domain_np.shape[0] == domain_size

    # test the reverse conversion
    domain_ps = ds.convert_PS_space(domain_np)

    # domain_ps and domain_list should be same
    assert type(domain_list) == type(domain_ps)
    assert len(domain_list) == len(domain_ps)

    # testing samples should be from param_dict and structure is preserved in transformations
    if len(domain_list) > 0:
        l1 = domain_list[0]
        l2 = domain_ps[0]
        assert type(l1) == type(l2)
        assert len(l1.keys()) == len(l2.keys())
        # all keys should be drawn from the param_dict
        assert len(l1.keys()) == len(param_dict.keys())

        for key in l1.keys():
            assert key in param_dict.keys()

    ps = dict(x=range(1, 100), y=['a', 'b'], z=uniform(-10, 20))
    ds = domain_space(ps, 100)

    x = ds.get_domain()
    x_gp = ds.convert_GP_space(x)
    x_rebuilt = ds.convert_PS_space(x_gp)
    for x1, x2 in zip(x, x_rebuilt):
        for k in x1.keys():
            v1 = x1[k]
            v2 = x2[k]
            if isinstance(v1, np.float64):
                assert v1 == approx(v2, abs=1e-5)
            else:
                if not v1 == v2:
                    print(k)
                    print(x)
                    print(x_gp)
                    print(x_rebuilt)
                assert v1 == v2
Exemple #3
0
def test_domain():
    ds = domain_space(param_dict,domain_size)

    #getting the samples from the domain
    domain_list = ds.get_domain()

    #number of samples should have same size as the domain_size
    assert len(domain_list) == domain_size

    #change this into the GP domain space
    domain_np = ds.convert_GP_space(domain_list)

    #in gaussian space verifying correctness of samples sizes and structure
    assert domain_np.shape[0] == domain_size

    #test the reverse conversion
    domain_ps = ds.convert_PS_space(domain_np)

    #domain_ps and domain_list should be same
    assert type(domain_list) == type(domain_ps)
    assert len(domain_list) == len(domain_ps)

    #testing samples should be from param_dict and structure is preserved in transformations
    if len(domain_list)>0:
        l1 = domain_list[0]
        l2 = domain_ps[0]
        assert type(l1) == type(l2)
        assert len(l1.keys()) == len(l2.keys())
        #all keys should be drawn from the param_dict
        assert len(l1.keys()) == len(param_dict.keys())

        for key in l1.keys():
            assert key in param_dict.keys()
Exemple #4
0
    def runRandomOptimizer(self):
        results = dict()
        # domain space abstraction
        ds = domain_space(self.conf_Dict['param_dict'], self.conf_Dict['domain_size'])

        X_sample_list = []
        Y_sample_list = []

        # running the iterations
        for i in range(self.conf_Dict['num_iteration']):
            # getting batch by batch random values to try
            random_hyper_parameters = ds.get_random_sample(self.conf_Dict['batch_size'])
            X_list, Y_list = self.runUserObjective(random_hyper_parameters)

            X_sample_list = X_sample_list + X_list
            Y_sample_list = Y_sample_list + Y_list

        # After all the iterations are done now bookeeping and best hyper parameter values
        results['params_tried'] = X_sample_list
        results['objective_values'] = Y_sample_list

        if len(Y_sample_list) > 0:
            results['best_objective'] = np.max(np.array(Y_sample_list))
            results['best_params'] = X_sample_list[np.argmax(np.array(Y_sample_list))]

        return results
Exemple #5
0
def test_gp_samples_to_params():
    space = {
        'a': range(10),
        'b': uniform(-10, 20),
        'c': ['cat1', 1, 'cat2'],
        'e': [1, 2, 3],
        'f': ['const'],
        'g': loguniform(0.001, 100),
        'h': [10]
    }
    X = np.array([
        # 4, -8, 'cat2', 1, 'const', 1 , 10
        [0.4444, 0.1, 0, 0, 1, 0, 1, 0.6, 0],
        # 0, -10.0, 'cat1', 3, 'const', 0.001 , 10
        [0.0, 0.0, 1, 0, 0, 1, 1, 0.0, 0],
        # 9, 10.0, 1, 2, 'const', 100 , 10
        [1.0, 1.0, 0, 1, 0, 0.5, 1, 1.0, 0],
    ])

    expected = [
        dict(a=4, b=-8.0, c='cat2', e=1, f='const', g=1, h=10),
        dict(a=0, b=-10.0, c='cat1', e=3, f='const', g=.001, h=10),
        dict(a=9, b=10.0, c=1, e=2, f='const', g=100, h=10),
    ]

    ds = domain_space(space, domain_size=1000)

    params = ds.convert_to_params(X)

    for act, exp in zip(params, expected):
        for k, v in act.items():
            if k == 'g':
                assert np.isclose(v, exp[k])
            else:
                assert v == exp[k]
Exemple #6
0
    def runRandomOptimizer(self):
        results = dict()
        # domain space abstraction
        ds = domain_space(self.param_dict, self.config.domain_size)

        X_sample_list = []
        Y_sample_list = []

        # running the iterations
        pbar = tqdm(range(self.config.num_iteration))
        for i in pbar:
            # getting batch by batch random values to try
            random_hyper_parameters = ds.get_random_sample(self.config.batch_size)
            X_list, Y_list = self.runUserObjective(random_hyper_parameters)

            X_sample_list = np.append(X_sample_list, X_list)
            Y_sample_list = np.append(Y_sample_list, Y_list)

            pbar.set_description("Best score: %s" % np.max(np.array(Y_sample_list)))

        # After all the iterations are done now bookkeeping and best hyper parameter values
        results['params_tried'] = X_sample_list
        results['objective_values'] = Y_sample_list

        if len(Y_sample_list) > 0:
            results['best_objective'] = np.max(Y_sample_list)
            results['best_params'] = X_sample_list[np.argmax(Y_sample_list)]

        if self.maximize_objective is False:
            results['objective_values'] = -1 * results['objective_values']
            results['best_objective'] = -1 * results['best_objective']

        return results
def test_mango_loguniform():
    space = {
        'a': mango_loguniform(-3, 6)
    }
    ds = domain_space(space, domain_size=1000)
    samples = ds.get_domain()
    assert all(1e-3 < sample['a'] < 1e3 for sample in samples)
def test_np_space():
    space = {
        'x': np.array(['a', 'b', 'c']),
        'y': np.arange(100),
    }

    ds = domain_space(space, domain_size=10)
    params = ds.get_domain()
    assert len(params) == 10

    gp_params = ds.convert_GP_space(params)
    assert gp_params.shape == (10, 4)
Exemple #9
0
def test_domain():
    params = {
        'a': (1, 2, 4),
        'b': np.random.randint(0, 1e6, size=50),
        'c': np.random.uniform(low=-100., high=100., size=(50, ))
    }
    print(params)
    ds = domain_space(params, domain_size=100)
    samples = ds.get_domain()

    for sample in samples:
        for param in params.keys():
            assert (sample[param] in params[param])
def test_gp_space():
    space = {
        'f': range(10),
        'h': uniform(-10, 20),
        'e': ['cat1', 1, 'cat2'],
        'c': [1, 2, 3],
        'a': ['const'],
        'g': loguniform(0.001, 100),
        'b': [10],
        'd': uniform(0, 1),
        'i': [True, False]
    }

    ds = domain_space(space, domain_size=10000)
    X = ds.sample_gp_space()

    assert (X <= 1.0).all()
    assert (X >= 0.0).all()
    assert (X[:, 0] == 1.).all()  # a
    assert (X[:, 1] == 0.).all()  # b
    assert np.isin(X[:, 2], [0.0, 0.5, 1.0]).all()  # c
    assert np.isin(X[:, 4:7], np.eye(3)).all()  # e
    assert X.shape == (ds.domain_size, 12)

    params = ds.convert_to_params(X)

    for param in params:
        assert param['a'] == 'const'
        assert param['b'] == 10
        assert param['c'] in space['c']
        assert 0.0 <= param['d'] <= 1.0
        assert param['e'] in space['e']
        assert param['f'] in space['f']
        assert 0.001 <= param['g'] <= 100
        assert -10 <= param['h'] <= 10
        assert param['i'] in space['i']

    X2 = ds.convert_to_gp(params)
    assert np.isclose(X2, X).all()
Exemple #11
0
    def __init__(self, param_dict, objective, conf_dict=None):

        self.param_dict = param_dict
        self.objective_function = objective
        self.maximize_objective = True

        if conf_dict is None:
            conf_dict = {}

        self.config = Tuner.Config(**conf_dict)

        if self.config.domain_size is None:
            self.config.domain_size = self.calculateDomainSize(self.param_dict)

        # overwrite batch size if given as a property of objective function
        if hasattr(objective, 'batch_size'):
            self.config.batch_size = objective.batch_size

        # save domain size
        self.ds = domain_space(self.param_dict, self.config.domain_size)

        # stores the results of using the tuner
        self.results = dict()
Exemple #12
0
    def runBayesianOptimizer(self):
        results = dict()
        # domain space abstraction
        ds = domain_space(self.conf_Dict['param_dict'], self.conf_Dict['domain_size'])

        # getting first few random values
        random_hyper_parameters = ds.get_random_sample(self.conf_Dict['initial_random'])
        X_list, Y_list = self.runUserObjective(random_hyper_parameters)

        # in case initial random results are invalid try different samples
        n_tries = 1
        while len(Y_list) < self.conf_Dict['initial_random'] and n_tries < 3:
            random_hps = ds.get_random_sample(self.conf_Dict['initial_random'] - len(Y_list))
            X_list2, Y_list2 = self.runUserObjective(random_hps)
            random_hyper_parameters.extend(random_hps)
            X_list.extend(X_list2)
            Y_list.extend(Y_list2)
            n_tries += 1

        if len(Y_list) == 0:
            raise ValueError("No valid configuration found to initiate the Bayesian Optimizer")

        # evaluated hyper parameters are used
        X_init = ds.convert_GP_space(X_list)
        Y_init = np.array(Y_list).reshape(len(Y_list), 1)

        # setting the initial random hyper parameters tried
        results['random_params'] = X_list
        results['random_params_objective'] = Y_list

        Optimizer = BayesianLearning(surrogate=self.conf_Dict.get('surrogate'))
        Optimizer.domain_size = self.conf_Dict['domain_size']

        X_sample = X_init
        Y_sample = Y_init

        hyper_parameters_tried = random_hyper_parameters
        objective_function_values = Y_list

        # running the iterations
        for i in range(self.conf_Dict['num_iteration']):
            # Domain Space
            domain_list = ds.get_domain()
            X_domain_np = ds.convert_GP_space(domain_list)

            # Black-Box Optimizer
            X_next_batch = Optimizer.get_next_batch(X_sample, Y_sample, X_domain_np,
                                                    batch_size=self.conf_Dict['batch_size'])
            # X_next_batch = Optimizer.get_next_batch_clustering(X_sample,Y_sample,X_domain_np,batch_size=self.conf_Dict['batch_size'])

            # Scheduler
            X_next_PS = ds.convert_PS_space(X_next_batch)

            # Evaluate the Objective function
            # Y_next_batch, Y_next_list = self.runUserObjective(X_next_PS)
            X_next_list, Y_next_list = self.runUserObjective(X_next_PS)
            Y_next_batch = np.array(Y_next_list).reshape(len(Y_next_list), 1)
            # update X_next_batch to successfully evaluated values
            X_next_batch = ds.convert_GP_space(X_next_list)

            # update the bookeeping of values tried
            hyper_parameters_tried = hyper_parameters_tried + X_next_list
            objective_function_values = objective_function_values + Y_next_list

            # Appending to the current samples
            X_sample = np.vstack((X_sample, X_next_batch))
            Y_sample = np.vstack((Y_sample, Y_next_batch))

        results['params_tried'] = hyper_parameters_tried
        results['objective_values'] = objective_function_values

        results['best_objective'] = np.max(Y_sample)
        results['best_params'] = hyper_parameters_tried[np.argmax(Y_sample)]

        # saving the optimizer and ds in the tuner object which can save the surrogate function and ds details
        self.Optimizer = Optimizer
        self.ds = ds
        return results
Exemple #13
0
    def runExponentialTuner(self):
        """
        Steps:
        1-Create DS obj for each obj of param_dict_list
        2-Sample randomly from the DS objects and evaluate the objective functions.
        3- Now use the GPR for each objective to select next batch
        4- Select the best values based on fxn(surrogate) from GPR.
        5- Modify the surrogate selections so as to avoid getting struck.
        """

        results = dict()

        #random params tried
        results['random_params'] = []

        #random objective values
        results['random_params_objective'] = []

        #random objective function id
        results['random_objective_fid'] = []

        results['params_tried'] = []
        results['objective_values'] = []
        results['objective_fid'] = []

        #print('*** Entering Metatuner *** ')
        num_of_random = self.initial_random

        ds = []
        for i in self.param_dict_list:
            domain_size = self.calculateDomainSize(i)
            ds.append(domain_space(i, domain_size))

        #dict of list for each obj function
        X_dict_list = {}
        Y_dict_list = {}

        #dict of array for each obj function
        X_dict_array = {}
        Y_dict_array = {}

        #stores the maximum value of the objective function for each objective
        Y_dict_array_max = {}

        #randomly evaluate the initial points for objectives
        for i in range(len(self.param_dict_list)):

            #used later
            self.first_itr[i] = True

            if self.debug:
                #fixing random seeds to reproduce the results for debugging
                np.random.seed(self.seed)
                random.seed(self.seed)

            ds_i = ds[i]

            #make sure not all values in y are same
            done = False

            #count no of times random repeated
            num_times = 0
            while not done:
                random_hyper_parameters = ds_i.get_random_sample(num_of_random)
                y_list = self.objective_list[i](random_hyper_parameters)
                #print(i, random_hyper_parameters, y_list)
                num_times = num_times + 1

                #10 times done still there is a tie
                if num_times == 10:
                    done = True
                    y_list[0] = y_list[0] - 0.001

                for ind in range(num_of_random):
                    if y_list[0] != y_list[ind]:
                        done = True

            #print('Random_hyper_parameters:',random_hyper_parameters, y_list)
            x_list = random_hyper_parameters

            #print(i, random_hyper_parameters, x_list, y_list)

            X_dict_list[i] = []
            X_dict_list[i].append(x_list)

            Y_dict_list[i] = []
            Y_dict_list[i].append(y_list)

            self.objective_values_list += y_list

            results['random_params'] = results['random_params'] + x_list
            results['random_params_objective'] = results[
                'random_params_objective'] + y_list
            results['random_objective_fid'] = results[
                'random_objective_fid'] + [i] * len(x_list)

            #x_array2 = ds_i.convert_GP_space(random_hyper_parameters)
            #x_array = ds_i.convert_to_gp(random_hyper_parameters)

            x_array = ds_i.convert_GP_space(random_hyper_parameters)

            X_dict_array[i] = x_array

            #print(x_array2)
            #print(x_array)

            y_array = np.array(y_list).reshape(len(y_list), 1)
            Y_dict_array[i] = y_array

            #the random ones are added as it is
            Y_dict_array_max[i] = y_array

        #Initialize the number of Optimizers
        Optimizer_list = []

        for i in range(len(self.objective_list)):
            Optimizer_i = BayesianLearning(surrogate=self.surrogate)
            Optimizer_i.domain_size = ds[i].domain_size
            Optimizer_list.append(Optimizer_i)

            #print('Domain Size:',ds[i].domain_size)

        # Storing optimizers attributes: iteration count and modified exploration of external surrogate
        Optimizer_exploration = []
        Optimizer_iteration = []
        for i in range(len(self.objective_list)):
            Optimizer_exploration.append(1.0)
            Optimizer_iteration.append(1.0)

        results['params_tried'] = results['random_params']
        results['objective_values'] = results['random_params_objective']
        results['objective_fid'] = results['random_objective_fid']

        #print(Optimizer_exploration)

        #Now run the optimization iterations
        pbar = tqdm(range(self.num_of_iterations))

        for itr in pbar:
            #next values of x returned from individual function
            #Values in x are dependent on types of param dict, so using a list
            x_values_list = []

            #Next promising surrogate values for each objective: used to select functions
            s_values_array = np.empty((0, 1), float)

            #keeping track of objective indices: Maps the x values to its objective function
            x_obj_indices = []

            #Next promising surrogate values for each objective in list form
            s_values_list = []

            #sample individual domains and evaluate surrogate functions.
            #we get the next promising samples along with the surrogate function values
            max_val_y = self.get_max_y_value(Y_dict_array)

            #In GPs this value is used to scale the Y values
            max_val_y_scaled = max_val_y
            #In this setting the default GP performs reasonably well
            if max_val_y_scaled < 1.0:
                max_val_y_scaled = 1.0

            for j in range(len(ds)):
                if self.debug:
                    #fixing random seeds to reproduce the results for debugging
                    np.random.seed(self.seed)
                    random.seed(self.seed)

                #X_domain_np = ds[j].sample_gp_space()

                if self.first_itr[j]:
                    #if j==self.last_used or self.first_itr[j]:
                    # Domain Space
                    domain_list = ds[j].get_domain()
                    self.store_domain[j] = ds[j].convert_GP_space(domain_list)
                    #self.store_domain[j] = ds[j].sample_gp_space()
                    self.first_itr[j] = True
                    #print('doing domain sampling')

                X_domain_np = self.store_domain[j]

                #next batch of x for this objective along with its surrogate value
                X_next_batch, surr_value, surr_value_ext, u_values = Optimizer_list[
                    j].get_next_batch_MetaTuner(
                        X_dict_array[j],
                        Y_dict_array[j] / max_val_y_scaled,
                        X_domain_np,
                        self.obj_batch_size,
                        Optimizer_exploration[j],
                        Optimizer_iteration[j],
                        classifier_index=j,
                        last_used_index=j)  #self.last_used

                #used for displaying and debugging
                #s_values_list.append([int(surr_value_ext[0]*100.0),int(surr_value[0]*100.0)])
                s_values_list.append([int(surr_value_ext[0] * 100.0)])

                #this is used to sort the objective values
                s_value = np.array(surr_value_ext)  #modified sandeep

                #s_value = np.array(surr_value_ext)
                s_value = s_value.reshape(-1, s_value.shape[0])

                s_values_array = np.append(s_values_array, s_value)
                x_values_list = x_values_list + X_next_batch

                #keep track of objective function for corresponding surrogate and x values
                for k in range(self.obj_batch_size):
                    x_obj_indices.append(j)

            #sort the surrogate values in descending order, to select the best value from them
            v_sorting_index = np.argsort(-s_values_array, axis=0)

            #now select the self.batch_size values from x_values_list based on v_sorting_index
            v_sorting_index = v_sorting_index[:self.batch_size]

            #select randomly with exploration rate else select the max surrogate

            prob_selection = random.random()

            random_selected = False

            #do the random evaluation of the functions
            if prob_selection < self.exploration_rate:
                selected_obj = random.randint(
                    0, (len(ds) - 1))  #0 and len(ds included)

                if self.exploration_rate > self.exploration_rate_min:
                    self.exploration_rate = self.exploration_rate * self.decay_rate

                else:
                    self.exploration_rate = self.exploration_rate_min

                random_selected = True

            else:
                selected_obj = v_sorting_index[0]

            #keep track of objective indices selected in current iteration
            loc_indices = []
            curr_x_next_np = x_values_list[selected_obj]

            #convert this into the parameter space for scheduling
            #see the function index for this x value
            index = selected_obj

            # updating last used classifier
            self.last_used = index

            #keep track of local indices
            loc_indices.append(index)

            #keep track of indices in a global datastr for visualization of function selection
            self.objectives_evaluated.append(index)

            curr_x_next = ds[index].convert_PS_space(curr_x_next_np)

            #run the next curr_x_next value for the objective function
            y_list = self.objective_list[index](curr_x_next)

            self.objective_values_list += y_list

            results['params_tried'] = results['params_tried'] + curr_x_next
            results['objective_values'] = results['objective_values'] + y_list
            results['objective_fid'] = results['objective_fid'] + [index]

            curr_y_array = np.array(y_list).reshape(len(y_list), 1)
            #append the curr_x_next_np, curr_x_next, y_list to appropriate datastructures for book keeping

            X_dict_array[index] = np.vstack(
                (X_dict_array[index], curr_x_next_np))
            Y_dict_array[index] = np.vstack(
                (Y_dict_array[index], curr_y_array))

            Y_dict_array_max[index] = np.vstack(
                (Y_dict_array_max[index], np.max(Y_dict_array[index])))

            #scale the exploration of objectives that are not selected for others make their exploration to 1
            for i in range(len(self.objective_list)):
                if i in loc_indices:
                    Optimizer_exploration[i] = 1.0
                    Optimizer_iteration[i] += 1.0
                else:
                    Optimizer_exploration[i] = Optimizer_exploration[
                        i] + 1.1 * self.exploration_rate

            pbar.set_description(": Best score: %s" % max_val_y)

            #print(itr, s_values_list, Optimizer_iteration, Optimizer_exploration, max_val_y)#, Y_dict_array[selected_obj])

            #print(itr, s_values_list, random_selected, self.exploration_rate, prob_selection, Optimizer_iteration , max_val_y)

            #print(itr, s_values_list, random_selected, Optimizer_iteration , int(self.exploration_rate*100.0), max_val_y)

            self.X_dict_array = X_dict_array
            self.Y_dict_array = Y_dict_array
            self.Y_dict_array_max = Y_dict_array_max
            self.ds = ds

        self.X_dict_array = X_dict_array
        self.Y_dict_array = Y_dict_array
        self.Y_dict_array_max = Y_dict_array_max
        self.ds = ds

        index_best = np.argmax(results['objective_values'])
        results['best_objective'] = results['objective_values'][index_best]
        results['best_params'] = results['params_tried'][index_best]
        results['best_objective_fid'] = results['objective_fid'][index_best]

        return results
Exemple #14
0
    def runBayesianOptimizer(self):
        results = dict()

        # domain space abstraction
        ds = domain_space(self.param_dict, self.config.domain_size)

        # getting first few random values
        random_hyper_parameters = ds.get_random_sample(self.config.initial_random)
        X_list, Y_list = self.runUserObjective(random_hyper_parameters)

        # in case initial random results are invalid try different samples
        n_tries = 1
        while len(Y_list) < self.config.initial_random and n_tries < 3:
            random_hps = ds.get_random_sample(self.config.initial_random - len(Y_list))
            X_list2, Y_list2 = self.runUserObjective(random_hps)
            random_hyper_parameters.extend(random_hps)
            X_list = np.append(X_list, X_list2)
            Y_list = np.append(Y_list, Y_list2)
            n_tries += 1

        if len(Y_list) == 0:
            raise ValueError("No valid configuration found to initiate the Bayesian Optimizer")

        # evaluated hyper parameters are used
        X_init = ds.convert_to_gp(X_list)
        Y_init = Y_list.reshape(len(Y_list), 1)

        # setting the initial random hyper parameters tried
        results['random_params'] = X_list
        results['random_params_objective'] = Y_list

        Optimizer = BayesianLearning(surrogate=self.config.surrogate, n_features=X_init.shape[1])
        Optimizer.domain_size = self.config.domain_size

        X_sample = X_init
        Y_sample = Y_init

        hyper_parameters_tried = random_hyper_parameters
        objective_function_values = Y_list
        surrogate_values = Y_list

        x_failed_evaluations = np.array([])

        # running the iterations
        pbar = tqdm(range(self.config.num_iteration))
        for i in pbar:
            # Domain Space
            X_domain_np = ds.sample_gp_space()

            # Black-Box Optimizer
            if self.config.scale_y:
                scale = np.max(Y_sample) - np.min(Y_sample)
                if scale == 0:
                    scale = np.max(np.abs(Y_sample))
                Y_scaled = (Y_sample - np.mean(Y_sample)) / scale
            else:
                Y_scaled = Y_sample

            if self.config.strategy_is_penalty:
                X_next_batch = Optimizer.get_next_batch(X_sample, Y_scaled, X_domain_np,
                                                    batch_size=self.config.batch_size)
            elif self.config.strategy_is_clustering:
                X_next_batch = Optimizer.get_next_batch_clustering(X_sample,Y_scaled, X_domain_np,
                                                                   batch_size=self.config.batch_size)
            else:
                # assume penalty approach
                X_next_batch = Optimizer.get_next_batch(X_sample, Y_scaled, X_domain_np,
                                                        batch_size=self.config.batch_size)

            # Scheduler
            X_next_PS = ds.convert_to_params(X_next_batch)

            # if all the xs have failed before, replace them with random sample
            # as we will not get any new information otherwise
            if all(x in x_failed_evaluations for x in X_next_PS):
                X_next_PS = ds.get_random_sample(self.config.batch_size)

            # Evaluate the Objective function
            # Y_next_batch, Y_next_list = self.runUserObjective(X_next_PS)
            X_next_list, Y_next_list = self.runUserObjective(X_next_PS)

            # keep track of all parameters that failed
            x_failed = [x for x in X_next_PS if x not in X_next_list]
            x_failed_evaluations = np.append(x_failed_evaluations, x_failed)

            if len(Y_next_list) == 0:
                # no values returned
                # this is problematic if domain is small and same value is tried again in the next iteration as the optimizer would be stuck
                continue

            Y_next_batch = Y_next_list.reshape(len(Y_next_list), 1)
            # update X_next_batch to successfully evaluated values
            X_next_batch = ds.convert_to_gp(X_next_list)

            # update the bookeeping of values tried
            hyper_parameters_tried = np.append(hyper_parameters_tried, X_next_list)
            objective_function_values = np.append(objective_function_values, Y_next_list)
            surrogate_values = np.append(surrogate_values, Optimizer.surrogate.predict(X_next_batch))

            # Appending to the current samples
            X_sample = np.vstack((X_sample, X_next_batch))
            Y_sample = np.vstack((Y_sample, Y_next_batch))
            pbar.set_description("Best score: %s" % np.max(Y_sample))

        results['params_tried'] = hyper_parameters_tried
        results['objective_values'] = objective_function_values
        results['surrogate_values'] = surrogate_values

        results['best_objective'] = np.max(Y_sample)
        results['best_params'] = hyper_parameters_tried[np.argmax(Y_sample)]

        if self.maximize_objective is False:
            results['objective_values'] = -1 * results['objective_values']
            results['best_objective'] = -1 * results['best_objective']

        # saving the optimizer and ds in the tuner object which can save the surrogate function and ds details
        self.Optimizer = Optimizer
        self.ds = ds
        return results