def run(self, lambdas1, lambdas2=None, log_file=None):
        self.log_file = log_file
        start = time.time()

        best_cost = self.MAX_COST
        # if only one lambda to tune
        if lambdas2 is None:
            self.fmodel = Fitted_Model(num_lambdas=1)
            self.log("%s lambda values: %s" % (self.method_label, lambdas1))
            for l1 in lambdas1:
                curr_lambdas = np.array([l1])
                model_params = self.problem_wrapper.solve(curr_lambdas,
                                                          quick_run=True)
                cost = self.get_validation_cost(model_params)

                self.log("%s: cost %f, lambda: %f, %f" %
                         (self.method_label, cost, l1))
                if best_cost > cost:
                    best_cost = cost
                    self.log("%s: best_validation_error %f, lambda: %f" %
                             (self.method_label, best_cost, l1))
                    self.fmodel.update(curr_lambdas, model_params, cost)
                    self.log(
                        "%s: best_model_params %s" %
                        (self.method_label, self.fmodel.best_model_params))

            self.fmodel.set_num_solves(len(lambdas1))
        else:
            self.fmodel = Fitted_Model(num_lambdas=2)
            self.log("%s lambda1 values: %s" % (self.method_label, lambdas1))
            self.log("%s lambda2 values: %s" % (self.method_label, lambdas2))
            # if two lambdas to tune
            for l1 in lambdas1:
                for l2 in lambdas2:
                    curr_lambdas = np.array([l1, l2])
                    model_params = self.problem_wrapper.solve(curr_lambdas,
                                                              quick_run=True)
                    cost = self.get_validation_cost(model_params)

                    self.log("%s: cost %f, lambdas: %f, %f" %
                             (self.method_label, cost, l1, l2))
                    if best_cost > cost:
                        best_cost = cost
                        self.log(
                            "%s: best_validation_error %f, lambdas: %f, %f" %
                            (self.method_label, best_cost, l1, l2))
                        self.fmodel.update(curr_lambdas, model_params, cost)
                        self.log(
                            "%s: best_model_params %s" %
                            (self.method_label, self.fmodel.best_model_params))

            self.fmodel.set_num_solves(len(lambdas1) * len(lambdas2))

        runtime = time.time() - start
        self.fmodel.set_runtime(runtime)
        self.log("%s: best cost %f, lambda %s" %
                 (self.method_label, best_cost, self.fmodel.current_lambdas))
        self.log("%s: best_model_params %s" %
                 (self.method_label, self.fmodel.best_model_params))
    def run(self, initial_lambdas_set, num_iters=10, log_file=None):
        self.log_file = log_file
        start = time.time()
        total_calls = 0
        self.fmodel = Fitted_Model(initial_lambdas_set[0].size)
        for initial_lambdas in initial_lambdas_set:
            self.log("initial_lambdas %s" % initial_lambdas)
            res = minimize(self.get_validation_cost,
                           initial_lambdas,
                           method='nelder-mead',
                           options={"maxiter": num_iters})
            model_params = self.problem_wrapper.solve(res.x, quick_run=True)
            total_calls += res.nfev
            self.fmodel.update(res.x, model_params, res.fun)
            self.log("fmodel %s" % self.fmodel)
        runtime = time.time() - start

        self.log("%s: best cost %f, lambda %s, total calls %d" %
                 (self.method_label, self.fmodel.best_cost,
                  self.fmodel.best_lambdas, total_calls))
        self.log("%s: best_model_params %s" %
                 (self.method_label, self.fmodel.best_model_params))

        self.fmodel.set_runtime(runtime)
        self.fmodel.set_num_solves(total_calls)
    def run(self, initial_lambda_set, debug=True, log_file=None):
        self.log_file = log_file
        start_time = time.time()

        self.fmodel = Fitted_Model(initial_lambda_set[0].size)
        best_cost = None
        best_initial_lambdas = None
        for initial_lambdas in initial_lambda_set:
            self.log("%s: initial_lambdas %s" %
                     (self.method_label, initial_lambdas))
            self._run_lambdas(initial_lambdas, debug=debug)
            if best_cost is None or best_cost > self.fmodel.best_cost:
                best_cost = self.fmodel.best_cost
                best_initial_lambdas = initial_lambdas
            self.log("%s: best start lambda %s" %
                     (self.method_label, best_initial_lambdas))

        runtime = time.time() - start_time
        self.log("%s: runtime %s" % (self.method_label, runtime))
        self.fmodel.set_runtime(runtime)
    def run(self, num_runs, log_file=None):
        self.log_file = log_file
        # Run spearmint to get next experiment parameters
        self.fmodel = Fitted_Model(self.num_lambdas)

        # Find new experiment
        best_cost = None
        runtime = 0
        for i in range(num_runs):
            start_time = time.time()
            self.log("%s: iter %d" % (self.method_label, i))
            self.run_spearmint_command(self.result_folder)
            self.log("Spearmint command completed")

            with open(self.result_file, 'r') as resfile:
                newlines = []
                for line in resfile.readlines():
                    values = line.split()
                    if len(values) < 3:
                        continue
                    val = values.pop(0)
                    dur = values.pop(0)
                    lambdas = np.array(
                        [10**float(v) for v in values[:self.num_lambdas]])
                    if (val == 'P'):
                        # P means pending experiment to run
                        # Run experiment
                        self.log("lambdas %s" % lambdas)
                        model_params = self.problem_wrapper.solve(
                            lambdas, quick_run=True)
                        if model_params is None:
                            current_cost = self.MAX_COST
                        else:
                            current_cost = self.get_validation_cost(
                                model_params)

                        if best_cost is None or best_cost > current_cost:
                            best_cost = current_cost
                            self.fmodel.update(lambdas, model_params,
                                               current_cost)
                            self.log("fmodel: %s" % self.fmodel)

                        newlines.append(
                            str(current_cost) + " 0 " + " ".join(values) +
                            "\n")
                    else:
                        # Otherwise these are previous experiment results
                        newlines.append(line)

            runtime += time.time() - start_time
            # Don't record time spent on writing files?
            with open(self.result_file, 'w') as resfile:
                resfile.writelines(newlines)

            sys.stdout.flush()

        self.fmodel.set_runtime(runtime)
        self.fmodel.set_num_solves(num_runs)
        self.log("%s: runtime %s" % (self.method_label, runtime))
        self.log("fmodel: %s" % self.fmodel)
        self.log("fmodel best_model_params: %s" %
                 self.fmodel.best_model_params)

        # VERY IMPORTANT to clean spearmint results
        self.run_spearmint_clean(self.result_folder)