def optimizeOptimization():
    """
    Run Bayesian Optimization several times with different configurations (f.ex. different acquisition functions).
        - save the best results from each optimization iteration and use then to initialize the next optimization
    """
    initDict = {'target': [], 'damping': [], 'friction': []}
    acquisitionFunctions = ['ei', 'ucb', 'ei', 'ucb', 'ei', 'ucb']

    for i in range(6):
        optimization = BO(runAll40ExperimentsAndGetCost, {'damping': (0, 1), 'friction': (0, 1)})
        try:
            optimization.initialize(initDict)
            optimization.maximize(n_iter=50, acq=acquisitionFunctions[i])
            res = optimization.res['max']

            if res['max_val'] not in initDict['target']:
                initDict['target'].append(res['max_val'])
                maxParams = res['max_params']
                initDict['damping'].append(maxParams['damping'])
                initDict['friction'].append(maxParams['friction'])
                print(initDict)

                with open('BO_sledge_params/6xBO_all40_60iters_0-8_0-4.txt', 'w') as file:
                    file.write(json.dumps(initDict))  # use `json.loads` to do the reverse

        except Exception as ex:
            print(ex)
            continue
def BayesianOptimizeArgmax(priorInputs, priorOutputs, ranges, acq='ucb', kappa=6):
    bo = BayesianOptimization(None, ranges)

    # BO requires a single dict with the keys being the parameters and 'target' which is the output from the function
    # Combine Inputs and Outputs into one dict.. { 'target': [...], 'x': [...], 'y': [...]}
    inputsOutputs = {}
    for sampleIter in range(len(priorInputs)):
        for paramIter, key in enumerate(ranges.keys()):
            if key in inputsOutputs:
                inputsOutputs[key].append(priorInputs[sampleIter][paramIter])
            else:
                inputsOutputs[key] = [priorInputs[sampleIter][paramIter]]
    inputsOutputs['target'] = priorOutputs

    bo.initialize(inputsOutputs)

    # Maximize the function approximation to find the best parameters (0 iterations since we dont have an explicit function to sample)
    bo.maximize(init_points=0, n_iter=0, acq=acq, kappa=kappa)

    # Argmax, return the optimal parameters to try next (based on Explore vs Exploit)
    knownYmax = bo.Y.max()
    knownArgmax = bo.X[bo.Y.argmax()]

    nextArgmax = helpers.acq_max(ac=bo.util.utility, gp=bo.gp, y_max=knownYmax, bounds=bo.bounds)
    return OrderedDict(zip(ranges.keys(), knownArgmax)), \
            knownYmax, \
            OrderedDict(zip(ranges.keys(), nextArgmax))
def optimizeSledgeParams():
    """ Run Bayesian Optimization with normalized inputs to find best parameters
        to replicate the free falling trajetories of the sledge
    """
    optimization = BO(runAll40ExperimentsAndGetCost, {'damping': (-1, 1), 'friction': (-1, 1)})

    # initialize optimization with so far best found parameters to improve the results
    optimization.initialize({
        'target': [-1198.0, -1300.0, -1197.0],
        'damping': [-0.6826810335539988, -0.8658977050860612, -0.6818051907866487],
        'friction': [0.5014436638411311, 0.7335613457613679, 0.49986569844586715]
    })

    optimization.maximize(n_iter=55, acq='ucb')

    print(optimization.res)
    # uncomment to save the results for monitoring
    # with open('BO_sledge_params/BO_all40_45iters_0-8_0-1.txt', 'w') as file:
    #     file.write(json.dumps(optimization.res))  # use `json.loads` to do the reverse

    results = np.array(optimization.res['all']['values'])
    results = results[results > (optimization.res['max']['max_val'] * 10)]

    bestParams = optimization.res['max']['max_params']
    unscaledDamping, unscaledFriction = bestParams['damping'], bestParams['friction']
    bestDamping, bestFriction = unscaleDampingAndFriction(unscaledDamping, unscaledFriction)
    print("Best Cost of {} was achieved with damping = {} and friction = {}"
          .format(max(results), bestDamping, bestFriction))
    tools.plot(results, title="Bayesian Optimization convergence", ylabel="Cost []", xlabel="Optimization step []")
Esempio n. 4
0
    def opti(self):

        bo = BayesianOptimization(self.trainAndCompareHit, {"x": (10, 50), "y": (0.1, 1.0)})

        bo.explore({"x": range(10, 50), "y": [0.1, 0.25, 0.5, 0.75, 1.0]})

        bo.initialize({-11: {"x": 20, "y": 0.5}})

        bo.maximize(init_points=5, n_iter=5, kappa=3.29)

        print(bo.res["max"])
Esempio n. 5
0
    def opti(self):

        bo = BayesianOptimization(self.trainAndCompareHit, {
            'x': (10, 50),
            'y': (0.1, 1.0)
        })

        bo.explore({'x': range(10, 50), 'y': [0.1, 0.25, 0.5, 0.75, 1.]})

        bo.initialize({-11: {'x': 20, 'y': 0.5}})

        bo.maximize(init_points=5, n_iter=5, kappa=3.29)

        print(bo.res['max'])
Esempio n. 6
0
def optimizeSolverParams():
    """ Run Bayesian Optimization with normalized inputs to find best parameters
            to replicate the vertical sledge position trajetories after the touchdown
        """

    optimization = BO(
        runAll40ExperimentsAndGetCost, {
            'solref1': (0, 1),
            'solref2': (0, 1),
            'solimp1': (0, 1),
            'solimp2': (0, 1),
            'solimp3': (0, 1)
        })

    # initialize optimization with so far best found parameters to improve the results
    optimization.initialize({
        'target': [-30668.7, -38917.8],
        'solref1': [0.10667760273187854, 0.7917295903960275],
        'solref2': [0, 0],
        'solimp1': [0.6323304436894982, 0.9999981685561232],
        'solimp2': [0.0, 0.0],
        'solimp3': [0.9029115238081952, 1.0]
    })

    print("BO settings: n_iter = 250 and acq=UCB")
    optimization.maximize(n_iter=75, acq='ei')

    print("Optimization Results:\n" + optimization.res)
    results = np.array(optimization.res['all']['values'])
    results = results[results > optimization.res['max']['max_val'] * 100]

    bestParams = optimization.res['max']['max_params']
    solref1, solref2, solimp1, solimp2, solimp3 = bestParams[
        'solref1'], bestParams['solref2'], bestParams['solimp1'], bestParams[
            'solimp2'], bestParams['solimp3']
    solref1, solref2, solimp1, solimp2, solimp3 = unscaleGroundContactParams(
        solref1, solref2, solimp1, solimp2, solimp3)
    print("Best Cost of {} was achieved with {}, {}, {}, {}, {} params".format(
        max(results), solref1, solref2, solimp1, solimp2, solimp3))

    tools.plot(results,
               title="Optimization Results from setting: {}".format(
                   settings.replace(" ", "_")))
Esempio n. 7
0
    'x2': [0, 1],
    'x3': [0, 1],
    'x4': [0, 1],
    'x5': [0, 1],
    'x6': [0, 1],
    'x7': [0, 1],
    'x8': (0, 1),
    'x9': (0, 1)
})

# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with 'target' and parameter names as keys and a
# list of corresponding values
"""
bo.initialize(
    {
        'target': [-1, -1],
        'x': [1, 1],
        'y': [0, 2]
    }
)
"""
# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.

bo.maximize(init_points=5, n_iter=30, kappa=2)

# The output values can be accessed with self.res
Esempio n. 8
0
    'x': (-4, 4),
    'y': (-3, 3)
})

# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})

# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with target values as keys of another
# dictionary with parameters names and their corresponding value.
bo.initialize({-2: {'x': 1, 'y': 0}, -1.251: {'x': 1, 'y': 1.5}})

# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(init_points=5, n_iter=15, kappa=3.29)

# The output values can be accessed with self.res
print(bo.res['max'])

# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
bo.explore({'x': [0.6], 'y': [-0.23]})

# Making changes to the gaussian process can impact the algorithm
Esempio n. 9
0
def run_bayesian_optimization(name, eval_file, target, var_ranges, init_points, max_iterations, patience, alpha):
    global evalcnt
    evalcnt = 0
    
    print "now optimizing the following variables: " + str(var_ranges)
    print "alpha = " + str(alpha)

    # change the kernel to have a length scale more appropriate to this function
    # alpha ... corresponds to the value added to the diagonal elements of the covariance matrix <-> the approximate noise level in the observations
    gp_params = {'kernel': ConstantKernel(1.0, (1e-8, 1e2)) * Matern(length_scale = 0.01, length_scale_bounds = (1e-5, 1e5), nu = 1.5),
                 'alpha': alpha}

    bo = BayesianOptimization(target, var_ranges)
    
    # check if a file with previous evaluations of this utility function already exists, if so, use it for initialization
    evaluations_path = os.path.join(out_dir, eval_file)
    
    if os.path.exists(evaluations_path):
        confhandler = ConfigFileHandler()
        confhandler.load_configuration(evaluations_path)
        
        init_dict = {}
        
        for section_name in confhandler.get_sections():
            cur_section = confhandler.get_section(section_name)
            
            for key, value in cur_section.iteritems():
                # only take those variables that are actually relevant
                if key in var_ranges or key == "target":
                    if key not in init_dict:
                        init_dict[key] = []
                    
                    init_dict[key].append(float(value))
                
        evalcnt = int(re.sub('evaluation_', '', confhandler.get_sections()[-1])) + 1
        print "resuming " + name + " at evaluation " + str(evalcnt)
        
        init_points_loaded = len(init_dict["target"])
        print "found " + str(init_points_loaded) + " initialization points: " + str(init_dict)
        
        bo.initialize(init_dict)
        bo.maximize(init_points = max(0, init_points - init_points_loaded), n_iter = 0, acq = 'poi', kappa = 3, xi = xi_scheduler(0.0, max_iterations), **gp_params)
        print "initialization done"
    else:
        bo.maximize(init_points = init_points, n_iter = 0, acq = 'poi', kappa = 3, xi = xi_scheduler(0.0, max_iterations), **gp_params)
    
    cur_iteration = 1
    patience_cnt = 0
    best_cost = -7.0
    
    for it in range(max_iterations): 
        cur_xi = xi_scheduler(cur_iteration, max_iterations)
        print "cur_iteration = " + str(cur_iteration) + ", using xi = " + str(cur_xi)

        cur_iteration += 1
        
        bo.maximize(init_points = 0, n_iter = 1, acq = 'poi', kappa = 3, xi = cur_xi, **gp_params)

        # evaluate the current maximum
        curval = bo.res['max']
        cost = curval['max_val']
        curparams = curval['max_params']
    
        confhandler = ConfigFileHandler()
        confhandler.config.optionxform = str
        confhandler.new_section(name)
        confhandler.set_field(name, 'target', str(cost))
        
        for key, val in curparams.iteritems():
            confhandler.set_field(name, key, str(val))
        
        confhandler.save_configuration(os.path.join(out_dir, name + '.txt'))
        
        # check if it is time to stop this optimization
        if(cost > best_cost):
            best_cost = cost
            patience_cnt = 0
            
        patience_cnt += 1
        
        if(patience_cnt > patience):
            break
            
    return curparams
Esempio n. 10
0
# together with the parameters names and their bounds.
bo = BayesianOptimization(lambda x, y: -x**2 - (y - 1)**2 + 1,
                          {'x': (-4, 4), 'y': (-3, 3)})

# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})

# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with target values as keys of another
# dictionary with parameters names and their corresponding value.
bo.initialize({-2: {'x': 1, 'y': 0}, -1.251: {'x': 1, 'y': 1.5}})

# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(init_points=15, n_iter=25)

# The output values can be accessed with self.res
print(bo.res['max'])


# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
bo.explore({'x': [0.6], 'y': [-0.23]})
bo.maximize(n_iter=5, acq='pi')
Esempio n. 11
0
class LearningGUI:
    def __init__(self, learning_client):
        self.learning_client = learning_client

        self.parent = tk.Tk()
        self.parent.resizable(0, 0)
        self.parent.minsize(500, 500)
        self.parent.maxsize(1000, 1000)
        self.parent.title('Parameters Learning GUI')
        self.parent.bind('<Control-c>', lambda e: self.parent.destroy())

        self.task_ctr_frame = tk.LabelFrame(self.parent,
                                            font=title_font,
                                            text="Task Settings")
        self.lrning_frame = tk.LabelFrame(
            self.parent,
            font=title_font,
            text="Bayesian Optimization Settings and Info")
        self.dyn_config_frame = tk.LabelFrame(
            self.parent,
            height=400,
            font=title_font,
            text="Dynamic Reconfigure Settings")

        self.lrning_frame.grid(row=0, column=1, sticky='wens', padx=2, pady=2)
        self.task_ctr_frame.grid(row=0, column=0, sticky='wns', padx=2, pady=2)
        self.dyn_config_frame.grid(row=1,
                                   column=0,
                                   columnspan=2,
                                   sticky='ns',
                                   padx=2,
                                   pady=2)
        self.parent.rowconfigure(1, minsize=400)
        self.parent.columnconfigure(1, weight=1)

        # --------- Task Control Frame --------- #
        self.task_ctr = dict()

        self.task_ctr['start'] = tk.Button(self.task_ctr_frame,
                                           width=10,
                                           font=normal_font,
                                           text='Start Task',
                                           command=self.start_task_once)
        self.task_ctr['fail'] = tk.Button(
            self.task_ctr_frame,
            width=10,
            font=normal_font,
            text='FAILURE',
            command=self.learning_client.cancel_task,
            fg='red')

        task_type_frame = tk.Frame(self.task_ctr_frame)
        v = tk.IntVar(task_type_frame)
        v.set(0)
        self.learning_client.set_action_settings(atype=0)
        self.task_ctr['circle'] = tk.Radiobutton(
            task_type_frame,
            text="Circle",
            variable=v,
            value=0,
            command=lambda: self.learning_client.set_action_settings(atype=0))
        self.task_ctr['square'] = tk.Radiobutton(
            task_type_frame,
            text="Square",
            variable=v,
            value=1,
            command=lambda: self.learning_client.set_action_settings(atype=1))
        self.task_ctr['circle'].pack(side='left', anchor='center')
        self.task_ctr['square'].pack(side='left', anchor='center')

        self.task_ctr['depth'] = tk.Scale(
            self.task_ctr_frame,
            from_=0.0,
            to=1.0,
            resolution=0.01,
            orient='horizontal',
            font=small_font,
            tickinterval=1,
            length=250,
            command=lambda x: self.learning_client.set_action_settings(
                depth=float(x)))
        self.task_ctr['radius'] = tk.Scale(
            self.task_ctr_frame,
            from_=0.1,
            to=1.5,
            resolution=0.1,
            orient='horizontal',
            font=small_font,
            tickinterval=1.4,
            length=250,
            command=lambda x: self.learning_client.set_action_settings(
                radius=float(x)))
        self.task_ctr['speed'] = tk.Scale(
            self.task_ctr_frame,
            from_=0.1,
            to=5.0,
            resolution=0.1,
            orient='horizontal',
            font=small_font,
            tickinterval=4.9,
            length=250,
            command=lambda x: self.learning_client.set_action_settings(
                speed=float(x)))

        self.task_ctr['depth'].set(0.05)
        self.task_ctr['radius'].set(1)
        self.task_ctr['speed'].set(0.2)
        self.task_ctr['start'].grid(columnspan=2, sticky='we', padx=2, pady=2)
        self.task_ctr['fail'].grid(row=0,
                                   column=2,
                                   columnspan=2,
                                   sticky='we',
                                   padx=2,
                                   pady=2)
        self.create_labels(self.task_ctr_frame, 1, 0, 0, 'Type', 'Depth',
                           'Radius', 'Speed')
        task_type_frame.grid(row=1,
                             column=1,
                             columnspan=3,
                             sticky='we',
                             padx=2,
                             pady=2)
        self.task_ctr['depth'].grid(row=2,
                                    column=1,
                                    columnspan=3,
                                    sticky='we',
                                    padx=2,
                                    pady=2)
        self.task_ctr['radius'].grid(row=3,
                                     column=1,
                                     columnspan=3,
                                     sticky='we',
                                     padx=2,
                                     pady=2)
        self.task_ctr['speed'].grid(row=4,
                                    column=1,
                                    columnspan=3,
                                    sticky='we',
                                    padx=2,
                                    pady=2)

        # --------- Bayesian Optimization and Info Frame --------- #
        self.lrning_ctr = dict()
        self.lrning_ctr['init'] = tk.Button(self.lrning_frame,
                                            text='Add Initial Point',
                                            font=normal_font,
                                            command=self.add_last_init_pt)
        self.lrning_ctr['raninit'] = tk.Button(
            self.lrning_frame,
            text='Add Random Initial Points',
            font=normal_font,
            command=self.add_rand_init_pt)
        self.lrning_ctr['points'] = tk.Scale(self.lrning_frame,
                                             from_=1,
                                             to=10,
                                             orient='horizontal',
                                             font=small_font,
                                             tickinterval=9,
                                             length=150)
        self.lrning_ctr['start'] = tk.Button(self.lrning_frame,
                                             text='Start Learning',
                                             font=normal_font,
                                             command=self.learn)
        self.lrning_ctr['iter'] = tk.Scale(self.lrning_frame,
                                           from_=1,
                                           to=40,
                                           orient='horizontal',
                                           font=small_font,
                                           tickinterval=39,
                                           length=150)
        self.lrning_ctr['cancel'] = tk.Button(self.lrning_frame,
                                              text='Cancel Learning',
                                              width=12,
                                              font=normal_font,
                                              command=self.reset_learning,
                                              fg='red')
        self.lrning_ctr['load'] = tk.Button(self.lrning_frame,
                                            text='Automatic Load & Learn',
                                            font=normal_font,
                                            command=self.load_init_pts)
        self.lrning_ctr['save'] = tk.Button(self.lrning_frame,
                                            text='Save Results',
                                            font=normal_font,
                                            command=self.save_results)

        self.lrning_ctr['init'].grid(row=0,
                                     column=0,
                                     columnspan=2,
                                     sticky='we',
                                     padx=2,
                                     pady=2)
        self.lrning_ctr['raninit'].grid(row=1,
                                        column=0,
                                        columnspan=2,
                                        sticky='we',
                                        padx=2,
                                        pady=2)
        tk.Label(self.lrning_frame,
                 font=small_font,
                 anchor='e',
                 width=8,
                 text='Points').grid(row=2, column=0, padx=2, pady=2)
        self.lrning_ctr['points'].grid(row=2,
                                       column=1,
                                       sticky='we',
                                       padx=2,
                                       pady=2)
        self.lrning_ctr['start'].grid(row=3,
                                      column=0,
                                      columnspan=2,
                                      sticky='we',
                                      padx=2,
                                      pady=2)
        tk.Label(self.lrning_frame,
                 font=small_font,
                 anchor='e',
                 width=8,
                 text='Iterations').grid(row=4, column=0, padx=2, pady=2)
        self.lrning_ctr['iter'].grid(row=4,
                                     column=1,
                                     sticky='we',
                                     padx=2,
                                     pady=2)
        self.lrning_ctr['save'].grid(row=0,
                                     column=2,
                                     sticky='we',
                                     padx=2,
                                     pady=2)
        self.lrning_ctr['load'].grid(row=0,
                                     column=3,
                                     sticky='we',
                                     padx=2,
                                     pady=2)
        self.lrning_ctr['cancel'].grid(row=1,
                                       column=2,
                                       columnspan=2,
                                       sticky='we',
                                       padx=2,
                                       pady=2)

        self.params_frame = tk.LabelFrame(self.lrning_frame,
                                          font=title_font,
                                          text="Learning Params.")
        self.params_frame.grid(row=2,
                               rowspan=3,
                               column=2,
                               sticky='ns',
                               padx=2,
                               pady=2)
        self.info_frame = tk.LabelFrame(self.lrning_frame,
                                        font=title_font,
                                        text="Information")
        self.info_frame.grid(row=2,
                             rowspan=3,
                             column=3,
                             sticky='wens',
                             padx=2,
                             pady=2)
        self.lrning_frame.columnconfigure(3, weight=1)

        self.create_labels(self.params_frame,
                           0,
                           0,
                           0,
                           'Alpha',
                           'xi/kappa',
                           'Acq. Fun.',
                           'Loc. Opt.',
                           'd',
                           'logistic',
                           anchor='e',
                           width=8)
        self.lrning_param = dict()
        self.lrning_param['alpha'] = tk.Entry(self.params_frame,
                                              font=small_font,
                                              width=10)
        self.lrning_param['alpha'].insert(0, '1e-10')
        self.lrning_param['const'] = tk.Entry(self.params_frame,
                                              font=small_font,
                                              width=10)
        self.lrning_param['const'].insert(0, '0.001')
        self.lrning_param['var'] = tk.StringVar(self.params_frame)
        self.lrning_param['var'].set('poi')
        self.lrning_param['acq'] = tk.OptionMenu(self.params_frame,
                                                 self.lrning_param['var'],
                                                 'poi', 'ei', 'ucb')
        self.lrning_param['acq'].config(font=small_font)

        self.lrning_param['locvar'] = tk.IntVar()
        self.lrning_param['local'] = tk.Checkbutton(
            self.params_frame, variable=self.lrning_param['locvar'])
        self.lrning_param['dist'] = tk.Entry(self.params_frame,
                                             font=small_font,
                                             width=10)
        self.lrning_param['dist'].insert(0, '0.1')
        self.lrning_param['logvar'] = tk.IntVar()
        self.lrning_param['logis'] = tk.Checkbutton(
            self.params_frame, variable=self.lrning_param['logvar'])

        self.lrning_param['alpha'].grid(row=0, column=1, padx=2, pady=2)
        self.lrning_param['const'].grid(row=1, column=1, padx=2, pady=2)
        self.lrning_param['acq'].grid(row=2,
                                      column=1,
                                      sticky='we',
                                      padx=2,
                                      pady=2)
        self.lrning_param['local'].grid(row=3,
                                        column=1,
                                        sticky='we',
                                        padx=2,
                                        pady=2)
        self.lrning_param['dist'].grid(row=4, column=1, padx=2, pady=2)
        self.lrning_param['logis'].grid(row=5,
                                        column=1,
                                        sticky='we',
                                        padx=2,
                                        pady=2)

        self.create_labels(self.info_frame,
                           0,
                           0,
                           0,
                           'Last Reward',
                           'Max Reward',
                           'Initial Points',
                           'Iterations',
                           'Dimensions',
                           anchor='e',
                           width=11)
        self.lrning_info = dict()
        self.lrning_info['last'] = tk.Label(self.info_frame,
                                            text='---',
                                            font=small_font,
                                            anchor='w',
                                            width=11,
                                            bd=2,
                                            relief='groove')
        self.lrning_info['max'] = tk.Label(self.info_frame,
                                           text='---',
                                           font=small_font,
                                           anchor='w',
                                           width=11,
                                           bd=2,
                                           relief='groove')
        self.lrning_info['init'] = tk.Label(self.info_frame,
                                            text='0',
                                            font=small_font,
                                            anchor='w',
                                            width=11,
                                            bd=2,
                                            relief='groove')
        self.lrning_info['itrs'] = tk.Label(self.info_frame,
                                            text='0',
                                            font=small_font,
                                            anchor='w',
                                            width=11,
                                            bd=2,
                                            relief='groove')
        self.lrning_info['dim'] = tk.Label(self.info_frame,
                                           text='0',
                                           font=small_font,
                                           anchor='w',
                                           width=11,
                                           bd=2,
                                           relief='groove')

        self.lrning_info['last'].grid(row=0, column=1, padx=2, pady=2)
        self.lrning_info['max'].grid(row=1, column=1, padx=2, pady=2)
        self.lrning_info['init'].grid(row=2, column=1, padx=2, pady=2)
        self.lrning_info['itrs'].grid(row=3, column=1, padx=2, pady=2)
        self.lrning_info['dim'].grid(row=4, column=1, padx=2, pady=2)

        # --------- Dynamic Reconfiguration Frame --------- #
        self.ctr_vars_frame = tk.LabelFrame(self.dyn_config_frame,
                                            height=400,
                                            font=title_font,
                                            text="Controlled Variables")
        self.fix_vars_frame = tk.LabelFrame(self.dyn_config_frame,
                                            height=400,
                                            font=title_font,
                                            text="Fixed Variables")
        self.link_vars_frame = tk.LabelFrame(self.dyn_config_frame,
                                             height=400,
                                             font=title_font,
                                             text="Linked Variables")
        self.dyn_ctr_frame = tk.Frame(self.dyn_config_frame)

        self.dyn_ctr_frame.pack(side='top', fill='x')
        self.ctr_vars_frame.pack(side='left', fill='y', padx=2, pady=2)
        self.fix_vars_frame.pack(side='left', fill='y', padx=2, pady=2)
        self.link_vars_frame.pack(side='left', fill='y', padx=2, pady=2)

        # --------- Dynamic Reconfiguration Control Frame --------- #
        self.dyn_update_button = tk.Button(self.dyn_ctr_frame,
                                           text='Send Values',
                                           width=12,
                                           font=normal_font,
                                           command=self.send_dyn_reconfig_vals)
        self.dyn_show_button = tk.Button(
            self.dyn_ctr_frame,
            text='Show Current',
            width=12,
            font=normal_font,
            command=self.show_current_dyn_reconfig)
        self.dyn_default_button = tk.Button(
            self.dyn_ctr_frame,
            text='Restore Default',
            width=12,
            font=normal_font,
            command=self.restore_dyn_reconfig_default)
        self.dyn_setmax_button = tk.Button(self.dyn_ctr_frame,
                                           text='Set on Max',
                                           width=12,
                                           font=normal_font,
                                           command=self.set_dyn_reconfig_max)

        self.dyn_update_button.pack(side='left', fill='y', padx=2, pady=2)
        self.dyn_show_button.pack(side='left', fill='y', padx=2, pady=2)
        self.dyn_default_button.pack(side='left', fill='y', padx=2, pady=2)
        self.dyn_setmax_button.pack(side='left', fill='y', padx=2, pady=2)

        # ---- Controlled Variables Frame ---- #
        self.create_labels(self.ctr_vars_frame, 0, 0, 1, 'Selected Variables',
                           'min.', 'set', 'max.')
        self.selected_ctr_vars = []
        self.dyn_ctr_bottom = dict()
        self.dyn_ctr_bottom['var'] = tk.StringVar(self.ctr_vars_frame)
        self.dyn_ctr_bottom['opt'] = tk.OptionMenu(self.ctr_vars_frame,
                                                   self.dyn_ctr_bottom['var'],
                                                   'a')
        self.dyn_ctr_bottom['min'] = tk.Entry(self.ctr_vars_frame,
                                              font=small_font,
                                              width=5)
        self.dyn_ctr_bottom['val'] = tk.Entry(self.ctr_vars_frame,
                                              font=small_font,
                                              width=5)
        self.dyn_ctr_bottom['max'] = tk.Entry(self.ctr_vars_frame,
                                              font=small_font,
                                              width=5)
        self.dyn_ctr_bottom['add'] = tk.Button(self.ctr_vars_frame,
                                               font=small_font,
                                               text='Add',
                                               command=self.add_dyn_ctr_var)

        # ---- Fixed Variables Frame ---- #
        self.create_labels(self.fix_vars_frame, 0, 0, 1, 'Selected Variables',
                           'set')
        self.selected_fix_vars = []
        self.dyn_fix_bottom = dict()
        self.dyn_fix_bottom['var'] = tk.StringVar(self.fix_vars_frame)
        self.dyn_fix_bottom['opt'] = tk.OptionMenu(self.fix_vars_frame,
                                                   self.dyn_fix_bottom['var'],
                                                   'a')
        self.dyn_fix_bottom['val'] = tk.Entry(self.fix_vars_frame,
                                              font=small_font,
                                              width=5)
        self.dyn_fix_bottom['add'] = tk.Button(self.fix_vars_frame,
                                               font=small_font,
                                               text='Add',
                                               command=self.add_dyn_fix_var)

        # ---- Linked Variables Frame ---- #
        self.create_labels(self.link_vars_frame, 0, 0, 1, 'Selected Variables',
                           'Linked')
        self.selected_link_vars = []
        self.dyn_link_bottom = dict()
        self.dyn_link_bottom['var1'] = tk.StringVar(self.link_vars_frame)
        self.dyn_link_bottom['opt1'] = tk.OptionMenu(
            self.link_vars_frame, self.dyn_link_bottom['var1'], 'a')
        self.dyn_link_bottom['var2'] = tk.StringVar(self.link_vars_frame)
        self.dyn_link_bottom['opt2'] = tk.OptionMenu(
            self.link_vars_frame, self.dyn_link_bottom['var2'], 'a')
        self.dyn_link_bottom['add'] = tk.Button(self.link_vars_frame,
                                                font=small_font,
                                                text='Add',
                                                command=self.add_dyn_link_var)

        self.reset_learning()
        self.update_bottoms()

        tk.mainloop()

    def create_labels(self, parent, row, column, h, *labels, **config):
        for i in range(len(labels)):
            label = tk.Label(parent, text=labels[i], font=small_font, **config)
            if h:
                label.grid(row=row, column=column + i, padx=2, pady=2)
            else:
                label.grid(row=row + i, column=column, padx=2, pady=2)

    def get_float_entry(self, entry):
        try:
            return float(entry.get())
        except ValueError:
            return None

    def reset_learning(self):
        self.learning_client.cancel_task
        self.bo = None  # Bayesian Optimization class place holder
        self.learning_started = self.task_running = self.learning_initialized = False
        self.iters_running = 0
        self.init_pts_num = 0
        self.last_reward = None
        self.pos_error = []
        self.rot_error = []
        self.toggle_task_ctr()
        self.toggle_lrning_ctr()
        self.toggle_dyn_reconfig_ctr()
        self.update_info()

    def init_bo(self):
        if not self.bo:
            self.bo = BO(self.start_task, self.learning_client.controlled_vars)
            self.learning_started = True
            self.toggle_task_ctr()
            self.toggle_dyn_reconfig_ctr()

    def update_info(self):
        self.lrning_info['dim'].config(text=str(len(self.selected_ctr_vars)))
        if self.bo:
            self.lrning_info['init'].config(text=str(self.init_pts_num))
            if self.bo.space.Y is not None:
                self.lrning_info['max'].config(text=str(max(self.bo.space.Y)))
            else:
                self.lrning_info['max'].config(text=str(max(self.bo.y_init)))
            if self.learning_initialized:
                self.lrning_info['itrs'].config(
                    text=str(len(self.bo.space.Y) - self.init_pts_num))
            else:
                self.lrning_info['itrs'].config(text='0')
        if self.last_reward is not None:
            self.lrning_info['last'].config(text=str(self.last_reward))

    def toggle_task_ctr(self):
        self.task_ctr['start'][
            'state'] = 'disabled' if self.task_running or self.iters_running else 'normal'
        self.task_ctr['fail'][
            'state'] = 'normal' if self.task_running else 'disabled'
        for key in ['depth', 'radius', 'speed', 'circle', 'square']:
            if self.task_running or self.learning_started:
                self.task_ctr[key]['state'] = 'disabled'
            else:
                self.task_ctr[key]['state'] = 'normal'

    def toggle_lrning_ctr(self):
        for key in ['init', 'raninit', 'points']:
            if (not self.task_running) and len(
                    self.selected_ctr_vars) and not self.learning_initialized:
                self.lrning_ctr[key]['state'] = 'normal'
            else:
                self.lrning_ctr[key]['state'] = 'disabled'
        for key in ['start', 'iter']:
            if not (self.task_running
                    or self.iters_running) and self.init_pts_num:
                self.lrning_ctr[key].config(state='normal')
            else:
                self.lrning_ctr[key].config(state='disabled')
        self.lrning_ctr['cancel'][
            'state'] = 'normal' if self.learning_started else 'disabled'
        self.lrning_ctr['save']['state'] = 'normal' if self.learning_initialized and \
                                                       not (self.task_running or self.iters_running) else 'disabled'
        for key in ['alpha', 'const', 'acq', 'local', 'dist']:
            if not (self.task_running
                    or self.iters_running) and self.init_pts_num:
                self.lrning_param[key].config(state='normal')
            else:
                self.lrning_param[key].config(state='disabled')
        self.lrning_param['logis']['state'] = 'normal' if self.learning_started and \
            not (self.task_running or self.iters_running or self.learning_initialized) else 'disabled'

    def toggle_dyn_reconfig_ctr(self):
        if self.task_running or self.learning_started:
            self.dyn_ctr_bottom['add']['state'] = 'disabled'
            self.dyn_fix_bottom['add']['state'] = 'disabled'
            self.dyn_link_bottom['add']['state'] = 'disabled'
            for var, items in (self.selected_ctr_vars +
                               self.selected_fix_vars +
                               self.selected_link_vars):
                items['del']['state'] = 'disabled'
            for var, items in self.selected_fix_vars:
                items['val']['state'] = 'disabled'
        else:
            self.dyn_ctr_bottom['add']['state'] = 'normal'
            self.dyn_fix_bottom['add']['state'] = 'normal'
            self.dyn_link_bottom['add']['state'] = 'normal'
            for var, items in (self.selected_ctr_vars +
                               self.selected_fix_vars +
                               self.selected_link_vars):
                items['del']['state'] = 'normal'
            for var, items in self.selected_fix_vars:
                items['val']['state'] = 'normal'
        if self.task_running or self.iters_running:
            self.dyn_update_button['state'] = 'disabled'
            self.dyn_show_button['state'] = 'disabled'
            self.dyn_default_button['state'] = 'disabled'
        else:
            self.dyn_update_button['state'] = 'normal'
            self.dyn_show_button['state'] = 'normal'
            self.dyn_default_button['state'] = 'normal'
        if self.learning_initialized and not (self.task_running
                                              or self.iters_running):
            self.dyn_setmax_button['state'] = 'normal'
        else:
            self.dyn_setmax_button['state'] = 'disabled'

    def start_task(self, **ctr_vars):
        self.task_running = True
        self.toggle_task_ctr()
        self.toggle_dyn_reconfig_ctr()
        self.toggle_lrning_ctr()
        if ctr_vars:
            for var in ctr_vars:
                self.learning_client.set_dyn_reconfig_var(var, ctr_vars[var])
            self.parent.after(200, self.check_new_dyn_vals)
        reward, self.last_pos_error, self.last_rot_error = self.learning_client.start_task(
        )
        self.last_reward = self.convert_reward(reward)
        self.task_running = False
        if self.iters_running:
            self.iters_running -= 1
            if not self.iters_running:
                self.restore_dyn_reconfig_default(
                )  # restore to default after finishing iterations.
            if not self.learning_initialized:
                self.init_pts_num += 1
            self.pos_error.append(self.last_pos_error)
            self.rot_error.append(self.last_rot_error)
        self.toggle_task_ctr()
        self.toggle_dyn_reconfig_ctr()
        self.toggle_lrning_ctr()
        self.parent.after(
            50, self.update_info
        )  # Needs to be done after a lag so that self.bo is updated after the current start_task return
        return self.last_reward

    def convert_reward(self, reward):
        if self.learning_initialized and self.enable_sig:
            reward = 1 / (1 + np.exp(-0.5 * (reward - self.sig_mid))
                          )  # logistic function
        return reward

    def check_new_dyn_vals(self):
        if self.learning_client.task_started:
            self.parent.after(100, self.show_current_dyn_reconfig)
        else:
            self.parent.after(200, self.check_new_dyn_vals)

    def start_task_once(self):
        self.task_thread = threading.Thread(target=self.start_task)
        self.task_thread.daemon = True
        self.task_thread.start()

    def add_last_init_pt(self):
        self.init_bo()
        if self.last_reward is not None:
            point = {'target': [self.last_reward]}
            for var in self.learning_client.controlled_vars:
                point[var] = [self.learning_client.current_config[var]]
            try:
                self.bo.initialize(point)
                self.init_pts_num += 1
                self.pos_error.append(self.last_pos_error)
                self.rot_error.append(self.last_rot_error)
                self.toggle_lrning_ctr()
                self.update_info()
            except KeyError:
                print "CANNOT add the same point twice"

    def load_init_pts(self):
        def simulate(init, dim, coef, atype, speed, sig, local):
            with open(str(dim) + 'd' + str(init) + '.pkl', 'rb') as f:
                data = pickle.load(f)
            self.learning_client.controlled_vars = data['controlled_vars']
            self.learning_client.fixed_vars = data['fixed_vars']
            self.learning_client.linked_vars = data['linked_vars']
            print data['controlled_vars']
            print data['fixed_vars']
            print data['linked_vars']

            self.reset_learning()
            self.learning_client.set_action_settings(atype=atype)
            self.learning_client.set_action_settings(speed=speed)
            self.enable_sig = sig

            for i in range(data['init_pts_num']):
                vars = dict(zip(data['keys'], data['x'][i]))
                self.start_task(**vars)
                self.add_last_init_pt()

            self.initialize_learning()
            self.iters_running = 40
            self.bo.maximize(init_points=0,
                             n_iter=40,
                             acq='poi',
                             kappa=0.01,
                             xi=0.01,
                             local_acq_opt=local,
                             d=0.05,
                             alpha=1e-4)
            self.save_results(('square' if atype else 'circle') + str(dim) +
                              'd' + str(coef) + '-' + str(init) * (2 - sig) +
                              2 * str(init) * (not local) + '-2' + 's' +
                              str(speed))

        def sims():
            simulate(1, 4, 1.16, 0, 0.5, True, False)

        self.task_thread = threading.Thread(target=sims)
        self.task_thread.daemon = True
        self.task_thread.start()

    def add_rand_init_pt(self):
        self.init_bo()
        itrs = int(self.lrning_ctr['points'].get())
        self.iters_running = itrs
        self.task_thread = threading.Thread(target=lambda: self.bo.init(itrs))
        self.task_thread.daemon = True
        self.task_thread.start()

    def initialize_learning(self):
        if self.init_pts_num:
            if not self.learning_initialized:
                self.learning_initialized = True
                if not self.bo.initialized:
                    self.bo.init(0)
                self.enable_sig = self.lrning_param['logvar'].get()
                self.sig_mid = max(self.bo.space.Y)
                for i in range(len(self.bo.space.Y)):
                    self.bo.space.Y[i] = self.convert_reward(
                        self.bo.space.Y[i])
                print self.bo.space.Y

    def learn(self):
        self.initialize_learning()
        if self.learning_initialized:
            itrs = int(self.lrning_ctr['iter'].get())
            alpha = self.get_float_entry(self.lrning_param['alpha'])
            const = self.get_float_entry(self.lrning_param['const'])
            d = self.get_float_entry(self.lrning_param['dist'])
            acq = self.lrning_param['var'].get()
            local = self.lrning_param['locvar'].get()
            self.iters_running = itrs
            self.task_thread = threading.Thread(
                target=lambda: self.bo.maximize(init_points=0,
                                                n_iter=itrs,
                                                acq=acq,
                                                kappa=const,
                                                xi=const,
                                                local_acq_opt=local,
                                                d=d,
                                                alpha=alpha))
            self.task_thread.daemon = True
            self.task_thread.start()

    def save_results(self, filename='learning_results_'):
        data = dict()
        data['x'] = self.bo.space.X
        data['y'] = self.bo.space.Y
        data['keys'] = self.bo.space.keys
        data['pos_error'] = np.vstack(self.pos_error)
        data['rot_error'] = np.vstack(self.rot_error)
        data['action_settings'] = self.learning_client.action_settings
        data['init_pts_num'] = self.init_pts_num
        data['learning_settings'] = {
            'acq': self.lrning_param['var'].get(),
            'loc': self.lrning_param['locvar'].get(),
            'd': self.get_float_entry(self.lrning_param['dist']),
            'alpha': self.get_float_entry(self.lrning_param['alpha']),
            'const': self.get_float_entry(self.lrning_param['const'])
        }
        data['controlled_vars'] = self.learning_client.controlled_vars
        data['fixed_vars'] = self.learning_client.fixed_vars
        data['linked_vars'] = self.learning_client.linked_vars
        data['sig_mid'] = self.sig_mid

        # Modified to alleviate file overriding problem (Ramy)
        modFileName = filename + datetime.datetime.now().strftime(
            '%Y-%m-%d-%H-%M-%S') + '.pkl'
        with open(modFileName, 'wb') as f:
            pickle.dump(data, f)

        del data
        shutil.move(
            os.getcwd() + "/" + modFileName,
            rospkg.RosPack().get_path('spc_uav_comm') + "/scripts/practical/" +
            modFileName)
        print "Results Saved !"

    # ---------------- Dynamic Reconfigure Functions --------------- #

    def add_dyn_ctr_var(self):
        var = self.dyn_ctr_bottom['var'].get()
        try:
            min_val = float(self.dyn_ctr_bottom['min'].get())
            max_val = float(self.dyn_ctr_bottom['max'].get())
            if self.learning_client.add_dyn_ctr_var(var, min_val, max_val):
                self.learning_client.set_dyn_reconfig_var(
                    var, self.get_float_entry(self.dyn_ctr_bottom['val']))
                self.add_dyn_ctr_row(var, min_val, max_val)
                self.update_info()
                self.toggle_lrning_ctr()
        except ValueError:
            return  # TODO: Warn the user for invalid input

    def add_dyn_fix_var(self):
        var = self.dyn_fix_bottom['var'].get()
        if self.learning_client.add_dyn_fix_var(var):
            self.learning_client.set_dyn_reconfig_var(
                var, self.get_float_entry(self.dyn_fix_bottom['val']))
            self.add_dyn_fix_row(var)

    def add_dyn_link_var(self):
        var1 = self.dyn_link_bottom['var1'].get()
        var2 = self.dyn_link_bottom['var2'].get()
        if self.learning_client.add_dyn_link_var(var1, var2):
            self.add_dyn_link_row(var1, var2)

    def add_dyn_ctr_row(self, var, min_val, max_val):
        items = dict()
        items['var'] = tk.Label(self.ctr_vars_frame,
                                font=small_font,
                                text=var,
                                anchor='e')
        items['min'] = tk.Label(self.ctr_vars_frame,
                                font=small_font,
                                text=min_val)
        items['val'] = tk.Entry(self.ctr_vars_frame, font=small_font, width=5)
        items['val'].insert(0, str(self.learning_client.current_config[var]))
        items['max'] = tk.Label(self.ctr_vars_frame,
                                font=small_font,
                                text=max_val)
        items['del'] = tk.Button(self.ctr_vars_frame,
                                 font=small_font,
                                 text='x',
                                 bd=0,
                                 relief='flat',
                                 command=lambda: self.delete_dyn_var(var))
        self.selected_ctr_vars.append((var, items))
        self.arrange_columns(items, 'var', 'min', 'val', 'max', 'del')
        self.place_dyn_row(items, len(self.selected_ctr_vars))
        self.update_bottoms()

    def add_dyn_fix_row(self, var):
        items = dict()
        items['var'] = tk.Label(self.fix_vars_frame,
                                font=small_font,
                                text=var,
                                anchor='e')
        items['val'] = tk.Entry(self.fix_vars_frame, font=small_font, width=5)
        items['val'].insert(0, str(self.learning_client.current_config[var]))
        items['del'] = tk.Button(self.fix_vars_frame,
                                 font=small_font,
                                 text='x',
                                 bd=0,
                                 relief='flat',
                                 command=lambda: self.delete_dyn_var(var))
        self.selected_fix_vars.append((var, items))
        self.arrange_columns(items, 'var', 'val', 'del')
        self.place_dyn_row(items, len(self.selected_fix_vars))
        self.update_bottoms()

    def add_dyn_link_row(self, var1, var2):
        items = dict()
        items['var1'] = tk.Label(self.link_vars_frame,
                                 font=small_font,
                                 text=var1,
                                 anchor='e')
        items['var2'] = tk.Label(self.link_vars_frame,
                                 font=small_font,
                                 text='= ' + var2,
                                 anchor='e')
        items['del'] = tk.Button(self.link_vars_frame,
                                 font=small_font,
                                 text='x',
                                 bd=0,
                                 relief='flat',
                                 command=lambda: self.delete_dyn_var(var1))
        self.selected_link_vars.append((var1, items))
        self.arrange_columns(items, 'var1', 'var2', 'del')
        self.place_dyn_row(items, len(self.selected_link_vars))
        self.update_bottoms()

    def arrange_columns(self, items, *columns):
        for i in range(len(columns)):
            config = items[columns[i]].grid_info()
            config['column'] = i
            items[columns[i]].grid(config)

    def place_dyn_row(self, items, row):
        for item in items.values():
            config = item.grid_info()
            config['row'] = row
            item.grid(config)

    def delete_dyn_var(self, var):
        self.learning_client.delete_dyn_reconfig_var(var)
        self.update_vars_rows(self.selected_ctr_vars)
        self.update_vars_rows(self.selected_fix_vars)
        self.update_vars_rows(self.selected_link_vars)
        self.update_bottoms()
        self.update_info()
        self.toggle_lrning_ctr()

    def update_vars_rows(self, selected_vars):
        i = 0
        while i < len(selected_vars):
            var, items = selected_vars[i]
            if var in self.learning_client.get_selectable_vars():
                for item in items.values():
                    item.destroy()
                selected_vars.pop(i)
            else:
                i += 1
        for i in range(len(selected_vars)):
            self.place_dyn_row(selected_vars[i][1], i + 1)

    def update_bottoms(self):
        self.place_dyn_ctr_bottom()
        self.place_dyn_fix_bottom()
        self.place_dyn_link_bottom()

    def place_dyn_ctr_bottom(self):
        row = len(self.selected_ctr_vars) + 1
        self.dyn_ctr_bottom['opt'] = self.set_optionmenu(
            self.ctr_vars_frame, self.dyn_ctr_bottom['opt'],
            self.dyn_ctr_bottom['var'],
            self.learning_client.get_selectable_vars())
        self.dyn_ctr_bottom['opt'].grid(row=1 + row,
                                        column=0,
                                        sticky="we",
                                        padx=2,
                                        pady=2)
        self.dyn_ctr_bottom['min'].grid(row=1 + row, column=1)
        self.dyn_ctr_bottom['val'].grid(row=1 + row, column=2)
        self.dyn_ctr_bottom['max'].grid(row=1 + row, column=3)
        self.dyn_ctr_bottom['add'].grid(row=1 + row,
                                        column=4,
                                        sticky="we",
                                        padx=2,
                                        pady=2)

    def place_dyn_fix_bottom(self):
        row = len(self.selected_fix_vars) + 1
        self.dyn_fix_bottom['opt'] = self.set_optionmenu(
            self.fix_vars_frame, self.dyn_fix_bottom['opt'],
            self.dyn_fix_bottom['var'],
            self.learning_client.get_selectable_vars())
        self.dyn_fix_bottom['opt'].grid(row=1 + row,
                                        column=0,
                                        sticky="we",
                                        padx=2,
                                        pady=2)
        self.dyn_fix_bottom['val'].grid(row=1 + row, column=1)
        self.dyn_fix_bottom['add'].grid(row=1 + row,
                                        column=2,
                                        sticky="we",
                                        padx=2,
                                        pady=2)

    def place_dyn_link_bottom(self):
        row = len(self.selected_link_vars) + 1
        self.dyn_link_bottom['opt1'] = self.set_optionmenu(
            self.link_vars_frame, self.dyn_link_bottom['opt1'],
            self.dyn_link_bottom['var1'],
            self.learning_client.get_selectable_vars())
        self.dyn_link_bottom['opt2'] = self.set_optionmenu(
            self.link_vars_frame, self.dyn_link_bottom['opt2'],
            self.dyn_link_bottom['var2'],
            self.learning_client.get_linkable_vars())
        self.dyn_link_bottom['opt1'].grid(row=1 + row,
                                          column=0,
                                          sticky="we",
                                          padx=2,
                                          pady=2)
        self.dyn_link_bottom['opt2'].grid(row=1 + row,
                                          column=1,
                                          sticky="we",
                                          padx=2,
                                          pady=2)
        self.dyn_link_bottom['add'].grid(row=1 + row,
                                         column=2,
                                         sticky="we",
                                         padx=2,
                                         pady=2)

    def set_optionmenu(self, frame, optionmenu, var, options):
        if options:
            options.sort()
            var.set('Select Variable')
            optionmenu.destroy()
            optionmenu = tk.OptionMenu(frame, var, *options)
            optionmenu.config(width=11, font=small_font)
        else:
            var.set('Not Available')
            optionmenu.config(width=11, font=small_font, state='disabled')
        return optionmenu

    def show_current_dyn_reconfig(self):
        self.learning_client.refresh_current_config()
        for var, items in (self.selected_ctr_vars + self.selected_fix_vars):
            items['val'].delete(0, 'end')
            items['val'].insert(0,
                                str(self.learning_client.current_config[var]))

    def restore_dyn_reconfig_default(self):
        self.learning_client.default_dyn_reconfig()
        self.parent.after(10, self.show_current_dyn_reconfig)

    def set_dyn_reconfig_max(self):
        if self.learning_initialized:
            point = self.bo.space.max_point()['max_params']
            self.learning_client.send_controlled_vars(**point)
            self.parent.after(10, self.show_current_dyn_reconfig)

    def send_dyn_reconfig_vals(self):
        for var, items in (self.selected_ctr_vars + self.selected_fix_vars):
            self.learning_client.set_dyn_reconfig_var(
                var, self.get_float_entry(items['val']))
        self.parent.after(10, self.show_current_dyn_reconfig)
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
from bayes_opt import BayesianOptimization


def lossfunction(lr, w1, w2):
    loss = np.random.uniform() * 20
    return -loss


bo = BayesianOptimization(lossfunction, {
    'lr': (0.3, 3.0),
    'w1': (0.3, 3.0),
    'w2': (0.3, 3.0)
})

bo.initialize({
    'target': [
        -16.626141084222657, -16.656124820118073, -16.62569318934877,
        -16.62492867148469
    ],
    'lr': [1.6879, 2.1292, 0.3, 0.3],
    'w1': [3.0000, 2.6128, 0.3, 0.3],
    'w2': [0.9983, 2.0528, 0.3, 3.0]
})
bo.maximize(init_points=0, n_iter=2, acq='ucb', kappa=2)
Esempio n. 13
0
def main():
    global evalcnt

    if len(sys.argv) != 4:
        print "Error: exactly 3 arguments are required"

    ref_dir = sys.argv[1]
    out_dir = sys.argv[2]
    lumi = float(sys.argv[3])

    print ref_dir
    print out_dir
    print lumi

    def punzi_target(WP_VBF2j, WP_VBF1j, WP_WHh, WP_ZHh):
        global evalcnt

        bin_dir = "/home/llr/cms/wind/cmssw/CMSSW_9_4_2/bin/slc6_amd64_gcc630/"
        cost_function_evaluator = "run_WP_evaluator"

        output = check_output([
            bin_dir + cost_function_evaluator, ref_dir, out_dir,
            str(lumi),
            str(WP_VBF2j),
            str(WP_VBF1j),
            str(WP_WHh),
            str(WP_ZHh)
        ])

        costval = 0.0

        for line in output.split('\n'):
            if "cost = " in line:
                costval = float(line.replace("cost = ", ""))
                break

        if math.isnan(costval):
            costval = -8.75

        # save the sampled point such that later they can be used as exploration points (if the need occurs)
        confhandler = ConfigFileHandler()
        evaluations_path = out_dir + 'evaluations.txt'

        if os.path.exists(evaluations_path):
            confhandler.load_configuration(evaluations_path)

        print "saving evaluation for iteration " + str(evalcnt)

        section_name = 'evaluation_' + str(evalcnt)
        confhandler.new_section(section_name)
        confhandler.set_field(section_name, 'cost', str(costval))
        confhandler.set_field(section_name, 'WP_VBF2j', str(WP_VBF2j))
        confhandler.set_field(section_name, 'WP_VBF1j', str(WP_VBF1j))
        confhandler.set_field(section_name, 'WP_WHh', str(WP_WHh))
        confhandler.set_field(section_name, 'WP_ZHh', str(WP_ZHh))

        confhandler.save_configuration(evaluations_path)

        evalcnt += 1

        return costval

    eps = 1e-3
    delta = 0.2
    bo = BayesianOptimization(
        punzi_target, {
            'WP_VBF2j': (eps, 1.0 - eps),
            'WP_VBF1j': (eps, 1.0 - eps),
            'WP_WHh': (eps, 1.0 - eps),
            'WP_ZHh': (eps, 1.0 - eps)
        })

    # check if a file with previously evaluated points exists, if so, use them for initialization
    confhandler = ConfigFileHandler()
    evaluations_path = out_dir + 'evaluations.txt'

    if os.path.exists(evaluations_path):
        confhandler.load_configuration(evaluations_path)

        targets_init = []
        WP_VBF2j_init = []
        WP_VBF1j_init = []
        WP_WHh_init = []
        WP_ZHh_init = []

        for section_name in confhandler.get_sections():
            cur_section = confhandler.get_section(section_name)

            targets_init.append(float(cur_section['cost']))
            WP_VBF2j_init.append(float(cur_section['WP_VBF2j']))
            WP_VBF1j_init.append(float(cur_section['WP_VBF1j']))
            WP_WHh_init.append(float(cur_section['WP_WHh']))
            WP_ZHh_init.append(float(cur_section['WP_ZHh']))

        init_dict = {
            'target': targets_init,
            'WP_VBF2j': WP_VBF2j_init,
            'WP_VBF1j': WP_VBF1j_init,
            'WP_WHh': WP_WHh_init,
            'WP_ZHh': WP_ZHh_init
        }

        evalcnt = int(re.sub('evaluation_', '',
                             confhandler.get_sections()[-1])) + 1

        print "resuming at evaluation " + str(evalcnt)

        bo.initialize(init_dict)
        initialized = True
    else:
        initialized = False

    # change the kernel to have a length scale more appropriate to this function
    gp_params = {
        'kernel':
        1.0 *
        Matern(length_scale=0.05, length_scale_bounds=(1e-5, 1e5), nu=1.5),
        'alpha':
        1e-5
    }

    # perform the standard initialization and setup
    if initialized:
        bo.maximize(init_points=0,
                    n_iter=0,
                    acq='poi',
                    kappa=3,
                    xi=xi_scheduler(0.0),
                    **gp_params)
    else:
        bo.maximize(init_points=6,
                    n_iter=0,
                    acq='poi',
                    kappa=3,
                    xi=xi_scheduler(0.0),
                    **gp_params)

    cur_iteration = 1
    for it in range(1000):
        cur_xi = xi_scheduler(cur_iteration)
        cur_iteration += 1
        print "using xi = " + str(cur_xi)

        bo.maximize(init_points=6,
                    n_iter=1,
                    acq='poi',
                    kappa=3,
                    xi=cur_xi,
                    **gp_params)

        # evaluate the current maximum
        curval = bo.res['max']
        cost = curval['max_val']
        WPs = curval['max_params']

        confhandler = ConfigFileHandler()
        confhandler.config.optionxform = str
        confhandler.new_section('WPs')
        confhandler.set_field('WPs', 'cost', str(cost))

        for key, val in WPs.iteritems():
            confhandler.set_field('WPs', key, str(val))

        confhandler.save_configuration(out_dir + 'WPs.txt')
Esempio n. 14
0
    'x': (-4, 4),
    'y': (-3, 3)
})

# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})

# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with 'target' and parameter names as keys and a
# list of corresponding values
bo.initialize({'target': [-1, -1], 'x': [1, 1], 'y': [0, 2]})

# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(init_points=5, n_iter=15, kappa=2)

# The output values can be accessed with self.res
print(bo.res['max'])

# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
bo.explore({'x': [0.6], 'y': [-0.23]})

# Making changes to the gaussian process can impact the algorithm
Esempio n. 15
0
    idx = round(1000 * (np.log(2) - s), scoredp)
    featimp = featimp.append(pd.Series(fscores, name=idx))
    return idx


while True:
    init_points = args.init
    n_iter = args.iter
    scaledrange = {k: (0, 1) for k in p_range.keys()}
    bo = BayesianOptimization(score, scaledrange)
    if p:
        bo.initialize({
            k: {
                pk: (pv - p_range[pk][0]) / (p_range[pk][1] - p_range[pk][0])
                for pk, pv in param.iteritems()
            }
            for k, param in p.iteritems()
        })
    else:
        init_points, n_iter = 5, 0
    if not args.trunc:
        bo.maximize(init_points=init_points, n_iter=n_iter, acq=args.acq)
        featimp_cur = featimp
        p_new = {}
        for i in xrange(len(bo.Y)):
            if bo.Y[i] not in bo.y_init:
                p_new[bo.Y[i].round(scoredp)] = {
                    bo.keys[j]: p_range[bo.keys[j]][0] * (1 - bo.X[i, j]) +
                    p_range[bo.keys[j]][1] * bo.X[i, j]
                    for j in xrange(len(bo.keys))
Esempio n. 16
0
# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})

# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with 'target' and parameter names as keys and a
# list of corresponding values
bo.initialize(
    {
        'target': [-1, -1],
        'x': [1, 1],
        'y': [0, 2]
    }
)

# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(init_points=5, n_iter=15, kappa=2)

# The output values can be accessed with self.res
print(bo.res['max'])

# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
Esempio n. 17
0
def main():
    global evalcnt

    if len(sys.argv) != 4:
        print "Error: exactly 3 arguments are required"

    run_dir = sys.argv[1]
    out_dir = sys.argv[2]
    engine = sys.argv[3]

    print run_dir
    print out_dir
    print engine

    # punzi_target_2d = lambda WHlept_prior, ZHlept_prior: punzi_target(ggH_prior_default, WHhadr_prior_default, ZHhadr_prior_default,
    #                                                                       WHlept_prior, ZHlept_prior, ZHMET_prior_default,
    #                                                                       ttHhadr_prior_default, ttHlept_prior_default)

    def punzi_target(ggH_prior, WHhadr_prior, ZHhadr_prior, WHlept_prior,
                     ZHlept_prior, ZHMET_prior, ttHhadr_prior, ttHlept_prior):
        global evalcnt

        bin_dir = "/home/llr/cms/wind/cmssw/CMSSW_9_4_2/bin/slc6_amd64_gcc630/"
        cost_function_evaluator = "run_prior_evaluator"

        output = check_output([
            bin_dir + cost_function_evaluator, run_dir, out_dir, engine,
            str(ggH_prior),
            str(WHhadr_prior),
            str(ZHhadr_prior),
            str(WHlept_prior),
            str(ZHlept_prior),
            str(ZHMET_prior),
            str(ttHhadr_prior),
            str(ttHlept_prior)
        ])

        costval = 0.0

        for line in output.split('\n'):
            if "cost = " in line:
                costval = float(line.replace("cost = ", ""))
                break

        if math.isnan(costval):
            costval = -8.75

        # add a regularization term that prefers default priors (i.e. close to 1.0)
        reg_term = 1.0 / 8.0 * (
            (ggH_prior - 1.0)**2.0 + (WHhadr_prior - 1.0)**2.0 +
            (ZHhadr_prior - 1.0)**2.0 + (WHlept_prior - 1.0)**2.0 +
            (ZHlept_prior - 1.0)**2.0 + (ZHMET_prior - 1.0)**2.0 +
            (ttHhadr_prior - 1.0)**2.0 + (ttHlept_prior - 1.0)**2.0)
        costval -= reg_term * lambda_reg

        # save the sampled point such that later they can be used as exploration points (if the need occurs)
        confhandler = ConfigFileHandler()
        evaluations_path = out_dir + 'evaluations.txt'

        if os.path.exists(evaluations_path):
            confhandler.load_configuration(evaluations_path)

        print "saving evaluation for iteration " + str(evalcnt)

        section_name = 'evaluation_' + str(evalcnt)
        confhandler.new_section(section_name)
        confhandler.set_field(section_name, 'cost', str(costval))
        confhandler.set_field(section_name, 'ggH_prior', str(ggH_prior))
        confhandler.set_field(section_name, 'WHhadr_prior', str(WHhadr_prior))
        confhandler.set_field(section_name, 'ZHhadr_prior', str(ZHhadr_prior))
        confhandler.set_field(section_name, 'WHlept_prior', str(WHlept_prior))
        confhandler.set_field(section_name, 'ZHlept_prior', str(ZHlept_prior))
        confhandler.set_field(section_name, 'ZHMET_prior', str(ZHMET_prior))
        confhandler.set_field(section_name, 'ttHhadr_prior',
                              str(ttHhadr_prior))
        confhandler.set_field(section_name, 'ttHlept_prior',
                              str(ttHlept_prior))

        confhandler.save_configuration(evaluations_path)

        evalcnt += 1

        return costval

    eps = 1e-1
    delta = 0.2
    bo = BayesianOptimization(
        punzi_target, {
            'ggH_prior': (1.0 - delta, 1.0 + delta),
            'WHhadr_prior': (eps, 1.0),
            'ZHhadr_prior': (eps, 1.0),
            'WHlept_prior': (eps, 1.0),
            'ZHlept_prior': (eps, 1.0),
            'ZHMET_prior': (eps, 1.0),
            'ttHhadr_prior': (eps, 1.0),
            'ttHlept_prior': (eps, 1.0)
        })

    # bo = BayesianOptimization(punzi_target_2d, {'WHlept_prior': (eps, WHlept_prior_default + delta),
    #                                                  'ZHlept_prior': (eps, ZHlept_prior_default + delta)})

    # check if a file with previously evaluated points exists, if so, use them for initialization
    confhandler = ConfigFileHandler()
    evaluations_path = out_dir + 'evaluations.txt'

    if os.path.exists(evaluations_path):
        confhandler.load_configuration(evaluations_path)

        ggH_priors_init = []
        WHhadr_priors_init = []
        ZHhadr_priors_init = []
        WHlept_priors_init = []
        ZHlept_priors_init = []
        ZHMET_priors_init = []
        ttHhadr_priors_init = []
        ttHlept_priors_init = []
        targets_init = []

        for section_name in confhandler.get_sections():
            cur_section = confhandler.get_section(section_name)

            targets_init.append(float(cur_section['cost']))
            ggH_priors_init.append(float(cur_section['ggH_prior']))
            WHhadr_priors_init.append(float(cur_section['WHhadr_prior']))
            ZHhadr_priors_init.append(float(cur_section['ZHhadr_prior']))
            WHlept_priors_init.append(float(cur_section['WHlept_prior']))
            ZHlept_priors_init.append(float(cur_section['ZHlept_prior']))
            ZHMET_priors_init.append(float(cur_section['ZHMET_prior']))
            ttHhadr_priors_init.append(float(cur_section['ttHhadr_prior']))
            ttHlept_priors_init.append(float(cur_section['ttHlept_prior']))

        init_dict = {
            'target': targets_init,
            'ggH_prior': ggH_priors_init,
            'WHhadr_prior': WHhadr_priors_init,
            'ZHhadr_prior': ZHhadr_priors_init,
            'WHlept_prior': WHlept_priors_init,
            'ZHlept_prior': ZHlept_priors_init,
            'ZHMET_prior': ZHMET_priors_init,
            'ttHhadr_prior': ttHhadr_priors_init,
            'ttHlept_prior': ttHlept_priors_init
        }

        evalcnt = int(re.sub('evaluation_', '',
                             confhandler.get_sections()[-1])) + 1

        print "resuming at evaluation " + str(evalcnt)

        bo.initialize(init_dict)
        initialized = True
    else:
        initialized = False

    # change the kernel to have a length scale more appropriate to this function
    # alpha ... corresponds to the value added to the diagonal elements of the covariance matrix <-> the approximate noise level in the observations
    gp_params = {
        'kernel':
        1.0 *
        Matern(length_scale=0.05, length_scale_bounds=(1e-5, 1e5), nu=1.5),
        'alpha':
        1e-1
    }

    # perform the standard initialization and setup
    if initialized:
        bo.maximize(init_points=0,
                    n_iter=0,
                    acq='poi',
                    kappa=3,
                    xi=xi_scheduler(0.0),
                    **gp_params)
    else:
        bo.maximize(init_points=6,
                    n_iter=0,
                    acq='poi',
                    kappa=3,
                    xi=xi_scheduler(0.0),
                    **gp_params)

    cur_iteration = 1
    for it in range(1000):
        cur_iteration += 1

        cur_xi = xi_scheduler(cur_iteration)
        print "using xi = " + str(cur_xi)

        bo.maximize(init_points=6,
                    n_iter=1,
                    acq='poi',
                    kappa=3,
                    xi=cur_xi,
                    **gp_params)

        # evaluate the current maximum
        curval = bo.res['max']
        cost = curval['max_val']
        priors = curval['max_params']

        confhandler = ConfigFileHandler()
        confhandler.config.optionxform = str
        confhandler.new_section('Priors')
        confhandler.set_field('Priors', 'cost', str(cost))
        confhandler.set_field('Priors', 'VBF_prior', str(1.0))

        for key, val in priors.iteritems():
            confhandler.set_field('Priors', key, str(val))

        confhandler.save_configuration(out_dir + 'priors.txt')
Esempio n. 18
0
def start_naive_bayes(automated_run, session, path):
    """Starts naive bayes automated run

    Args:
        automated_run (xcessiv.models.AutomatedRun): Automated run object

        session: Valid SQLAlchemy session

        path (str, unicode): Path to project folder
    """
    module = functions.import_string_code_as_module(automated_run.source)
    random_state = 8 if not hasattr(module,
                                    'random_state') else module.random_state
    assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators

    # get non-searchable parameters
    base_estimator = automated_run.base_learner_origin.return_estimator()
    base_estimator.set_params(**module.default_params)
    default_params = functions.make_serializable(base_estimator.get_params())
    non_searchable_params = dict((key, val)
                                 for key, val in iteritems(default_params)
                                 if key not in module.pbounds)

    # get already calculated base learners in search space
    existing_base_learners = []
    for base_learner in automated_run.base_learner_origin.base_learners:
        if not base_learner.job_status == 'finished':
            continue
        in_search_space = True
        for key, val in iteritems(non_searchable_params):
            if base_learner.hyperparameters[key] != val:
                in_search_space = False
                break  # If no match, move on to the next base learner
        if in_search_space:
            existing_base_learners.append(base_learner)

    # build initialize dictionary
    target = []
    initialization_dict = dict((key, list()) for key in module.pbounds.keys())
    for base_learner in existing_base_learners:
        # check if base learner's searchable hyperparameters are all numerical
        all_numerical = True
        for key in module.pbounds.keys():
            if not isinstance(base_learner.hyperparameters[key],
                              numbers.Number):
                all_numerical = False
                break
        if not all_numerical:
            continue  # if there is a non-numerical hyperparameter, skip this.

        for key in module.pbounds.keys():
            initialization_dict[key].append(base_learner.hyperparameters[key])
        target.append(base_learner.individual_score[module.metric_to_optimize])
    initialization_dict['target'] = target if not module.invert_metric \
        else list(map(lambda x: -x, target))
    print('{} existing in initialization dictionary'.format(
        len(initialization_dict['target'])))

    # Create function to be optimized
    func_to_optimize = return_func_to_optimize(
        path, session, automated_run.base_learner_origin,
        module.default_params, module.metric_to_optimize, module.invert_metric,
        set(module.integers))

    # Create Bayes object
    bo = BayesianOptimization(func_to_optimize, module.pbounds)

    bo.initialize(initialization_dict)

    np.random.seed(random_state)

    bo.maximize(**module.maximize_config)

    automated_run.job_status = 'finished'
    session.add(automated_run)
    session.commit()
Esempio n. 19
0
    def run(self, num_iterations, kappa=10):
        """
        Fetches latest t-SNE model from DB. Collects pickled BO status object, if existent.
        Intermediate t-SNE models are persisted.
        :param num_iterations: Number of iterations BO should run.
        :param kappa:
        :return:
        """

        # 1. Load all previous t-SNE parameters and results (read_metadata_for_run). Consider which params are
        #    fixed though! Put only dynamic ones in dict. for BO, static ones should be made available via class
        #    attribute.
        self.run_metadata = self.db_connector.read_metadata_for_run(
            self.run_name)

        # Set fixed parameters.
        self.fixed_parameters = self._update_parameter_dictionary(
            run_iter_metadata=self.run_metadata[0], is_fixed=True)
        self.variable_parameters = self._update_parameter_dictionary(
            run_iter_metadata=self.run_metadata[0], is_fixed=False)

        # 2. Generate dict object for BO.initialize from t-SNE metadata.
        initialization_dataframe = pandas.DataFrame.from_dict(
            self.run_metadata)
        # Drop non-hyperparameter columns.
        initialization_dataframe.drop(TSNEModel.ISFIXED_COLUMN_NAMES,
                                      inplace=True,
                                      axis=1)

        # Create initialization dictionary.
        initialization_dict = {
            column_name[3:-6]:
            initialization_dataframe[column_name[3:-6]].values.tolist()
            for column_name in TSNEModel.ISFIXED_COLUMN_NAMES
        }
        # Add target values (model quality) to initialization dictionary.
        initialization_dict["target"] = initialization_dataframe[
            "measure_user_quality"].values.tolist()
        # Replace categorical values (strings) with integer representations.
        initialization_dict["metric"] = [
            TSNEModel.CATEGORICAL_VALUES["metric"].index(metric)
            for metric in initialization_dict["metric"]
        ]
        initialization_dict["init_method"] = [
            TSNEModel.CATEGORICAL_VALUES["init_method"].index(metric)
            for metric in initialization_dict["init_method"]
        ]

        # 3. Create BO object.
        parameter_ranges = copy.deepcopy(TSNEModel.PARAMETER_RANGES)
        # Update key for min. gradient norm, since for whatever reason BO optimizer wrecks this number.
        parameter_ranges["min_grad_norm"] = (-10, -7)

        # Drop all fixed parameters' ranges and entries in initialization dictionary.
        for key in self.fixed_parameters:
            if self.fixed_parameters[key] is not None:
                del parameter_ranges[key]
                del initialization_dict[key]

        # Create optimization object.
        bo = BayesianOptimization(self._calculate_tsne_quality,
                                  parameter_ranges)

        # Pass previous results to BO instance.
        bo.initialize(initialization_dict)

        # 4. Execute optimization.
        num_init_points = max(int(num_iterations / 4), 1)
        bo.maximize(init_points=num_init_points,
                    n_iter=(num_iterations - num_init_points),
                    kappa=kappa,
                    acq='ucb')
        def target(x):
            return np.exp(-(x - 2)**2) + np.exp(-(x - 6)**2 / 10) + 1 / (x**2 + 1)


        x = np.linspace(-2, 10, 10000).reshape(-1, 1)
        y = target(x)

        bo = BayesianOptimization(target, {'x': (-2, 10)})

        # Additionally, if we have any prior knowledge of the behaviour of
        # the target function (even if not totally accurate) we can also
        # tell that to the optimizer.
        # Here we pass a dictionary with 'target' and parameter names as keys and a
        # list of corresponding values
        bo.initialize({
            'target': [0.20166, 1.08328, 1.30455, 0.21180],
            'x': [-2, 2.6812, 1.6509, 10]
        })

        # utility = Upper Confidence Bound
        # alternative acq = 'ei' (Expected Improvement)
        # kappa = exploration vs exploitation. 10 -> much exploration, 1 -> only tight exploitation
        bo.maximize(init_points=5, n_iter=5, acq='ucb', kappa=5)

        # The output values can be accessed with self.res
        print('Best param/output so far:', bo.res['max'])

        utility = bo.util.utility(x, bo.gp, 0)
        print('Best param to test next:', x[np.argmax(utility)])

        y_max = bo.Y.max()
        x_max = helpers.acq_max(ac=bo.util.utility, gp=bo.gp, y_max=y_max, bounds=bo.bounds)
Esempio n. 21
0
        fscore = fi['Expected Gain'].to_dict() #Gain, FScore, wFScore, Average wFScore, Average Gain, Expected Gain
        meanscore = np.average(fscore.values())
        for k in fscore.keys(): fscore[k]/=meanscore*len(fscore)
        featimpmean=featimpmean.fillna(1./featimp.shape[1])
        normalization = featimpmean[chosen_feat].sum()/featimpmean.sum()/np.sum(fscore.values())/kftune.get_n_splits()
        for k,v in fscore.iteritems():
          fscores[k]+=normalization*v
      curscore = -mean_absolute_error(yback(y_pred,params),yback(train_y,params))
      featimp = featimp.append(pd.Series(fscores,name=round(curscore,4)))
      return curscore

while True:
  init_points=args.init
  n_iter=args.iter
  bo = BayesianOptimization(score, p_range)
  if p: bo.initialize(p)
  else: init_points,n_iter=1,0
  if args.trunc: init_points,n_iter=0,0
  bo.maximize(init_points=init_points, n_iter=n_iter, acq=args.acq)
  featimp_cur=featimp
  p_new = {}
  for i in xrange(len(bo.Y)):
    if bo.Y[i] not in bo.y_init:
      p_new[bo.Y[i].round(4)]={bo.keys[j]:bo.X[i,j].round(4) for j in xrange(len(bo.keys))}

  if not os.path.isfile(picklefile): break
  with open(picklefile,'rb') as infile:
      try:
        (featimp,p_now,n_rounds_without_improve,n_wait)=pickle.load(infile)
        p.update(p_now)
      except: featimp=featimp_cur
Esempio n. 22
0
def start_automated_run(path, automated_run_id):
    """Starts automated run. This will automatically create
    base learners until the run finishes or errors out.

    Args:
        path (str): Path to Xcessiv notebook

        automated_run_id (str): Automated Run ID
    """
    with functions.DBContextManager(path) as session:
        automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()
        if not automated_run:
            raise exceptions.UserError('Automated run {} '
                                       'does not exist'.format(automated_run_id))
        automated_run.job_id = get_current_job().id
        automated_run.job_status = 'started'

        session.add(automated_run)
        session.commit()

        try:
            module = functions.import_string_code_as_module(automated_run.source)
            random_state = 8 if not hasattr(module, 'random_state') else module.random_state
            assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators

            # get non-searchable parameters
            base_estimator = automated_run.base_learner_origin.return_estimator()
            base_estimator.set_params(**module.default_params)
            default_params = functions.make_serializable(base_estimator.get_params())
            non_searchable_params = dict((key, val) for key, val in iteritems(default_params)
                                         if key not in module.pbounds)

            # get already calculated base learners in search space
            existing_base_learners = []
            for base_learner in automated_run.base_learner_origin.base_learners:
                if not base_learner.job_status == 'finished':
                    continue
                in_search_space = True
                for key, val in iteritems(non_searchable_params):
                    if base_learner.hyperparameters[key] != val:
                        in_search_space = False
                        break  # If no match, move on to the next base learner
                if in_search_space:
                    existing_base_learners.append(base_learner)

            # build initialize dictionary
            target = []
            initialization_dict = dict((key, list()) for key in module.pbounds.keys())
            for base_learner in existing_base_learners:
                # check if base learner's searchable hyperparameters are all numerical
                all_numerical = True
                for key in module.pbounds.keys():
                    if not isinstance(base_learner.hyperparameters[key], numbers.Number):
                        all_numerical = False
                        break
                if not all_numerical:
                    continue  # if there is a non-numerical hyperparameter, skip this.

                for key in module.pbounds.keys():
                    initialization_dict[key].append(base_learner.hyperparameters[key])
                target.append(base_learner.individual_score[module.metric_to_optimize])
            initialization_dict['target'] = target if not module.invert_metric \
                else list(map(lambda x: -x, target))
            print('{} existing in initialization dictionary'.
                  format(len(initialization_dict['target'])))

            # Create function to be optimized
            func_to_optimize = return_func_to_optimize(
                path, session, automated_run.base_learner_origin, module.default_params,
                module.metric_to_optimize, module.invert_metric, set(module.integers)
            )

            # Create Bayes object
            bo = BayesianOptimization(func_to_optimize, module.pbounds)

            bo.initialize(initialization_dict)

            np.random.seed(random_state)

            bo.maximize(**module.maximize_config)

            automated_run.job_status = 'finished'
            session.add(automated_run)
            session.commit()

        except:
            session.rollback()
            automated_run.job_status = 'errored'
            automated_run.description['error_type'] = repr(sys.exc_info()[0])
            automated_run.description['error_value'] = repr(sys.exc_info()[1])
            automated_run.description['error_traceback'] = \
                traceback.format_exception(*sys.exc_info())
            session.add(automated_run)
            session.commit()
            raise
Esempio n. 23
0
            np.savez(path,
                     mu0=X[keys.index('mu0')],
                     gamma0=X[keys.index('gamma0')],
                     beta0=X[keys.index('beta0')],
                     eta0=X[keys.index('eta0')],
                     kappa0=X[keys.index('kappa0')],
                     kappa1=X[keys.index('kappa1')],
                     lambda0=X[keys.index('lambda0')],
                     target=Y)

        name_result_file = 'resultPG5PC1PC2.txt'
        label = 'PG5PC1PC2'
    else:
        print('Warning: set model name')
        sys.exit()

    # training
    BO = BayesianOptimization(evaluate_BO, pbounds)
    if args.init_npz:
        npzfile = np.load(args.init_npz)
        BO.initialize(dict(npzfile))
    BO.maximize(init_points=bo_init_points, n_iter=bo_num_iter)
    saveBO(BO, 'full')
    '''
    with open(os.path.join(snapshot_path, name_result_file),'a') as f:
        f.write('##{},full##\n'.format(label))
        f.write('keys: {}\n'.format(BO.keys))
        f.write('opt X: {}\n'.format(BO.X[BO.Y.argmax()]))
        f.write('max cor: {}\n'.format(BO.Y.max()))
    '''