Exemplo n.º 1
0
 def test_reproduce(self):
     seed = 1234
     res1 = dual_annealing(self.func, self.ld_bounds, seed=seed)
     res2 = dual_annealing(self.func, self.ld_bounds, seed=seed)
     res3 = dual_annealing(self.func, self.ld_bounds, seed=seed)
     # If we have reproducible results, x components found has to
     # be exactly the same, which is not the case with no seeding
     assert_equal(res1.x, res2.x)
     assert_equal(res1.x, res3.x)
Exemplo n.º 2
0
 def test_neldermed_ls_minimizer(self):
     minimizer_opts = {
         'method': 'Nelder-Mead',
     }
     ret = dual_annealing(self.func, self.ld_bounds,
                          local_search_options=minimizer_opts)
     assert_allclose(ret.fun, 0., atol=1e-6)
Exemplo n.º 3
0
 def test_callback_stop(self):
     # Testing that callback make the algorithm stop for
     # fun value <= 1.0 (see callback method)
     ret = dual_annealing(self.func, self.ld_bounds,
                          callback=self.callback)
     assert ret.fun <= 1.0
     assert 'stop early' in ret.message[0]
Exemplo n.º 4
0
 def test_colyba_ls_minimizer(self):
     minimizer_opts = {
         'method': 'COBYLA',
     }
     ret = dual_annealing(self.func, self.ld_bounds,
                          local_search_options=minimizer_opts)
     assert_allclose(ret.fun, 0., atol=1e-5)
Exemplo n.º 5
0
 def test_slsqp_ls_minimizer(self):
     minimizer_opts = {
         'method': 'SLSQP',
     }
     ret = dual_annealing(self.func, self.ld_bounds,
                          local_search_options=minimizer_opts)
     assert_allclose(ret.fun, 0., atol=1e-7)
Exemplo n.º 6
0
 def test_tnc_ls_minimizer(self):
     minimizer_opts = {
         'method': 'TNC',
     }
     ret = dual_annealing(self.func, None, self.ld_bounds,
                          local_search_options=minimizer_opts)
     assert_allclose(ret.fun, 0., atol=1e-8)
Exemplo n.º 7
0
 def test_gradient_gnev(self):
     minimizer_opts = {
         'jac': self.rosen_der_wrapper,
     }
     ret = dual_annealing(rosen, self.ld_bounds,
                          local_search_options=minimizer_opts)
     assert ret.njev == self.ngev
Exemplo n.º 8
0
    def test_max_fun_ls(self):
        ret = dual_annealing(self.func, self.ld_bounds, maxfun=100)

        ls_max_iter = min(max(
            len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
            LocalSearchWrapper.LS_MAXITER_MIN),
            LocalSearchWrapper.LS_MAXITER_MAX)
        assert ret.nfev <= 100 + ls_max_iter
Exemplo n.º 9
0
    def run_dualannealing(self):
        """
        Do an optimization run for dual_annealing
        """
        self.function.nfev = 0

        t0 = time.time()

        res = dual_annealing(self.fun,
                             None,
                             self.bounds)

        t1 = time.time()
        res.success = self.function.success(res.x)
        res.nfev = self.function.nfev
        self.add_result(res, t1 - t0, 'DA')
Exemplo n.º 10
0
 def test_low_dim(self):
     ret = dual_annealing(
         self.func, self.ld_bounds, seed=self.seed)
     assert_allclose(ret.fun, 0., atol=1e-12)
Exemplo n.º 11
0
 def test_maxiter(self):
     ret = dual_annealing(self.func, self.ld_bounds, maxiter=700)
     assert ret.nit <= 700
Exemplo n.º 12
0
 def test_fun_args_ls(self):
     ret = dual_annealing(self.func,
                          self.ld_bounds,
                          args=((3.14159, )),
                          seed=self.seed)
     assert_allclose(ret.fun, 3.14159, atol=1e-6)

def eggholder(x):
    return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1]  + 47))))
            -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1]  + 47)))))

bounds = [(-512, 512), (-512, 512)]

x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])

results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5,
                                      sampling_method='sobol')

fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
               cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')

def plot_point(res, marker='o', color=None):
    ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
Exemplo n.º 14
0
    def _parameter_value_selection(self, trajectories, _maxiter=100):
        '''
        given a set of trajectories (trajectories)
           it is assumed that the raw trajectories have been processed through _approximate_trajectory_partitioning() before calling this
        break them up into line segments, and wrap them in line_segment objects
        then use simulated annealing to find an optimal value for epsilon, and thus minlns
           note, dual annealing is used here as it should be superior to traditional simulated annealing for this task
        then the optimal epsilon and minlns are returned

        maxiter, the maximum number of iterations for the simulated annealing process
        '''
        
        assert len(trajectories) > 0

        lines = list()
        for t in trajectories:
            _lss = self._convert_trajectory_to_line_segments(t)

            lines.extend(_lss)

        #first, finding the optimal epsilon
        #with these functions and via simulated annealing
        def H(line_segments, epsilon):
            '''
            line_segments, the set of line segments 
            epsilon, the epsilon distance used to compute the epsilon neighborhood
            '''

            def p(x, line_segments, epsilon):
                '''
                x, a single line segment
                    to find the epsilon neighborhood around
                line_segments, the set of line segments where neighbors are found
                epsilon, the epsilon distance used to compute the epsilon neighborhood
                '''

                Ne_xi = len( x.get_epsilon_neighborhood(line_segments, epsilon) )

                Ne_xj_sum = 0
                for ls in line_segments:
                    Ne_xj = len( ls.get_epsilon_neighborhood(line_segments, epsilon) )

                    Ne_xj_sum += Ne_xj

                return float(Ne_xi/Ne_xj_sum)

            ret = 0
            for ls in line_segments:
                import numpy as np
                ret += -p(ls, line_segments, epsilon)*np.log2(p(ls, line_segments, epsilon))

            return ret

        def func(x):
            '''
            a wrapper function to use with scipy.optimize.dual_annealing
            '''

            return H(lines, epsilon=x)

        #find the largest and smallest values for the distance between line segments, to use with simulated annealing
        ds = list()
        prev = None
        for l in lines:
            if prev is None:
                prev = l
                continue

            ds.append( self._distance( prev.get_line_segment(), l.get_line_segment() ) )

            prev = l

        _max = max(ds)
        _min = min(ds)

        #then find the optimal epsilon with simulated annealing
        optimal_epsilon = None

        #approach = 'scipy'
        approach = 'pygmo'

        if approach == 'scipy':
            from scipy.optimize import dual_annealing
            result = dual_annealing(func, [[_min, _max]], maxiter=_maxiter)
            optimal_epsilon = result.x[0]

        if approach == 'pygmo':
            import pygmo as pg
            algo = pg.algorithm( pg.simulated_annealing() )

            class epsilon_problem:
                def fitness(self, x):
                    return [func(x)]

                def get_bounds(self):
                    return ([_min], [_max])

            prob = pg.problem( epsilon_problem() )

            pop = pg.population(prob, 1300)

            optimal_epsilon = pop.champion_x[0]

        #then with the optimal epsilon in hand, we can calculate the average size of the epsilon neighborhoods
        Nes = list()
        for l in lines:
            Nes.append( len( l.get_epsilon_neighborhood(lines, optimal_epsilon) ) )
        
        avg_Ne = float(sum(Nes) / len(Nes))

        #given this average, a range for minlns can be formulated
        minlns_range = [avg_Ne+1, avg_Ne+3]

        return [optimal_epsilon, minlns_range]

#end TRACLUS
Exemplo n.º 15
0
def _optim(y_obs,
           MT,
           lb,
           ub,
           S=None,
           LossFunc='Mfree',
           DeviationType='mse',
           n_lig=1,
           AL=None,
           KAL=None,
           optimizerKW={}):
    """ Single-point optimization based on a variety of loss functions (vdB/Ruzic, Scatchard)  """
    if 'maxiter' in optimizerKW:
        maxiter = optimizerKW['maxiter']

    lb = np.array(lb)
    ub = np.array(ub)

    if (S is None) and (len(lb) != 2 * n_lig + 1 or len(ub) != 2 * n_lig + 1):
        raise ValueError(
            'This model requires {:d} value(s) for lower bound and upper bound!'
            .format(2 * n_lig + 1))

    if (S is not None) and (len(lb) != 2 * n_lig or len(ub) != 2 * n_lig):
        raise ValueError(
            'This model requires {:d} value(s) for lower bound and upper bound!'
            .format(2 * n_lig))

    if DeviationType not in ['mse', 'mae']:
        raise ValueError(
            'Choose Deviation Type to be one of \"mse\" or \"mae\"!')

    if LossFunc not in ['Mfree', 'Scatchard', 'vdB', 'Gerringa']:
        raise ValueError(
            'Choose Deviation Type to be one of \"Mfree\", \"Scatchard\", \"vdB\", or \"Gerringa\"!'
        )

    # defining different loss functions

    if (S is None) and (AL is None) and (KAL is None):
        #ASV
        # we need to find S as well as ligand concentration and constant
        def objFunc(X):
            if n_lig == 1:
                LT = X[:1]
                K = 10**X[1:2]
                S = X[2:]
            else:
                LT = X[0:2]
                K = 10**X[2:4]
                S = X[4:]

            tmp = _titr_simulate(MT, LT, K, n_lig, AL, KAL)

            if LossFunc == 'Mfree':
                Mfree_obs = y_obs / S
                Mfree_pred = tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((Mfree_pred - Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(Mfree_pred - Mfree_obs))

            if LossFunc == 'Scatchard':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                allML_Mfree_obs = all_M_L_obs / Mfree_obs
                allML_Mfree_pred = tmp[:, 1] / tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((allML_Mfree_pred - allML_Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(allML_Mfree_pred - allML_Mfree_obs))

            if LossFunc == 'vdB':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                Mfree_allML_obs = Mfree_obs / all_M_L_obs
                Mfree_allML_pred = tmp[:, 0] / tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((Mfree_allML_obs - Mfree_allML_pred)**2))
                else:
                    floss = np.mean(np.abs(Mfree_allML_obs - Mfree_allML_pred))

            if LossFunc == 'Gerringa':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                all_M_L_pred = tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((all_M_L_obs - all_M_L_pred)**2))
                else:
                    floss = np.mean(np.abs(all_M_L_obs - all_M_L_pred))

            return floss

        ret = dual_annealing(func=objFunc,
                             bounds=list(zip(lb, ub)),
                             maxiter=maxiter,
                             seed=2442)
        return ret.x  # optimized LT, K, and S

    if (S is not None) and (AL is None) and (KAL is None):
        #ASV
        def objFunc(X):
            if n_lig == 1:
                LT = X[0:1]
                K = 10**X[1:2]

            else:
                LT = X[0:2]
                K = 10**X[2:4]

            tmp = _titr_simulate(MT, LT, K, n_lig, AL, KAL)

            if LossFunc == 'Mfree':
                Mfree_obs = y_obs / S
                Mfree_pred = tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((Mfree_pred - Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(Mfree_pred - Mfree_obs))

            if LossFunc == 'Scatchard':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                allML_Mfree_obs = all_M_L_obs / Mfree_obs
                allML_Mfree_pred = tmp[:, 1] / tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((allML_Mfree_pred - allML_Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(allML_Mfree_pred - allML_Mfree_obs))

            if LossFunc == 'vdB':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                Mfree_allML_obs = Mfree_obs / all_M_L_obs
                Mfree_allML_pred = tmp[:, 0] / tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((Mfree_allML_obs - Mfree_allML_pred)**2))
                else:
                    floss = np.mean(np.abs(Mfree_allML_obs - Mfree_allML_pred))

            if LossFunc == 'Gerringa':
                Mfree_obs = y_obs / S
                all_M_L_obs = MT - Mfree_obs
                all_M_L_pred = tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((all_M_L_obs - all_M_L_pred)**2))
                else:
                    floss = np.mean(np.abs(all_M_L_obs - all_M_L_pred))

            return floss

        ret = dual_annealing(func=objFunc,
                             bounds=list(zip(lb, ub)),
                             maxiter=maxiter,
                             seed=2442)
        return ret.x  # optimized LT, K, and S

    if (S is None) and (AL is not None) and (KAL is not None):
        # ACSV method, so y_obs is the current signal equal to [MAL] * S
        def objFunc(X):
            if n_lig == 1:
                LT = X[0:1]
                K = 10**X[1:2]
                S = X[2]
            else:
                LT = X[0:2]
                K = 10**X[2:4]
                S = X[4]

            tmp = _titr_simulate(MT, LT, K, n_lig, AL, KAL)

            if LossFunc == 'Mfree':
                MAL_obs = y_obs / S  # nM
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                Mfree_pred = tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((Mfree_pred - Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(Mfree_pred - Mfree_obs))

            if LossFunc == 'Scatchard':
                MAL_obs = y_obs / S  # nM
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                allML_Mfree_obs = all_M_L_obs / Mfree_obs
                allML_Mfree_pred = tmp[:, 1] / tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((allML_Mfree_pred - allML_Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(allML_Mfree_pred - allML_Mfree_obs))

            if LossFunc == 'vdB':
                MAL_obs = y_obs / S
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                Mfree_allML_obs = Mfree_obs / all_M_L_obs
                Mfree_allML_pred = tmp[:, 0] / tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((Mfree_allML_obs - Mfree_allML_pred)**2))
                else:
                    floss = np.mean(np.abs(Mfree_allML_obs - Mfree_allML_pred))

            if LossFunc == 'Gerringa':
                MAL_obs = y_obs / S
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                all_M_L_pred = tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((all_M_L_obs - all_M_L_pred)**2))
                else:
                    floss = np.mean(np.abs(all_M_L_obs - all_M_L_pred))

            return floss

        ret = dual_annealing(func=objFunc,
                             bounds=list(zip(lb, ub)),
                             maxiter=maxiter,
                             seed=2442)
        return ret.x  # optimized LT, K, and S

    if (S is not None) and (AL is not None) and (KAL is not None):

        def objFunc(X):

            if n_lig == 1:
                LT = X[0:1]
                K = 10**X[1:2]

            else:
                LT = X[0:2]
                K = 10**X[2:4]

            tmp = _titr_simulate(MT, LT, K, n_lig, AL, KAL)

            if LossFunc == 'Mfree':
                MAL_obs = y_obs / S  # nM
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                Mfree_pred = tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((Mfree_pred - Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(Mfree_pred - Mfree_obs))

            if LossFunc == 'Scatchard':
                MAL_obs = y_obs / S  # nM
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                allML_Mfree_obs = all_M_L_obs / Mfree_obs
                allML_Mfree_pred = tmp[:, 1] / tmp[:, 0]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((allML_Mfree_pred - allML_Mfree_obs)**2))
                else:
                    floss = np.mean(np.abs(allML_Mfree_pred - allML_Mfree_obs))

            if LossFunc == 'vdB':
                MAL_obs = y_obs / S
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                Mfree_allML_obs = Mfree_obs / all_M_L_obs
                Mfree_allML_pred = tmp[:, 0] / tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(
                        np.mean((Mfree_allML_obs - Mfree_allML_pred)**2))
                else:
                    floss = np.mean(np.abs(Mfree_allML_obs - Mfree_allML_pred))

            if LossFunc == 'Gerringa':
                MAL_obs = y_obs / S
                Mfree_obs = MAL_obs / 1e9 / KAL / (AL / 1e9 -
                                                   MAL_obs / 1e9) * 1e9  # nM
                all_M_L_obs = MT - Mfree_obs - MAL_obs
                all_M_L_pred = tmp[:, 1]
                if DeviationType == 'mse':
                    floss = np.sqrt(np.mean((all_M_L_obs - all_M_L_pred)**2))
                else:
                    floss = np.mean(np.abs(all_M_L_obs - all_M_L_pred))

            return floss

        ret = dual_annealing(func=objFunc,
                             bounds=list(zip(lb, ub)),
                             maxiter=maxiter,
                             seed=2442)
        return ret.x  # optimized LT, and K
Exemplo n.º 16
0
 def test_max_fun_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
                          no_local_search=True, maxfun=500, seed=self.seed)
     assert ret.nfev <= 500
     assert not ret.success
Exemplo n.º 17
0
 def test_high_dim(self):
     ret = dual_annealing(self.func, self.hd_bounds)
     assert_allclose(ret.fun, 0., atol=1e-12)
Exemplo n.º 18
0
    def train(
        self,
        logger_name=None,
        custom_bounds=None,
        grad_tol: float = 1e-4,
        x_tol: float = 1e-5,
        line_steps: int = 20,
        print_progress: Union[bool, str] = False,
        **kwargs,
    ):
        """Train RBCM model on training data. Tunes the
        hyperparameters to maximize the likelihood, then computes L and alpha
        (related to the covariance matrix of the training set).
        Args:
            logger (logging.Logger): logger object specifying where to write the
                progress of the optimization.
            custom_bounds (np.ndarray): Custom bounds on the hyperparameters.
            grad_tol (float): Tolerance of the hyperparameter gradient that
                determines when hyperparameter optimization is terminated.
            x_tol (float): Tolerance on the x values used to decide when
                Nelder-Mead hyperparameter optimization is terminated.
            line_steps (int): Maximum number of line steps for L-BFGS
                hyperparameter optimization.
        """

        verbose = "warning"
        if print_progress:
            verbose = "info"
        if isinstance(print_progress,
                      str) and print_progress.lower() == "debug":
            verbose = "debug"
        if logger_name is None:
            set_logger(
                "gp_algebra",
                stream=False,
                fileout_name="log.gp_algebra",
                verbose=verbose,
            )
            logger_name = "gp_algebra"

        supported_algorithms = [
            "differential evolution",
            "dual annealing",
            "L-BFGS-B",
            "basin hopping",
            "BFGS",
        ]
        if self.opt_algorithm not in supported_algorithms:
            raise ValueError(
                f"Optimization Algorithm {self.opt_algorithm} not "
                f"supported. Please "
                f"choose from {supported_algorithms}")

        disp = False  # print_progress

        if len(self.training_data) == 0 or len(self.training_labels) == 0:
            raise Warning("You are attempting to train a GP with no "
                          "training data. Add environments and forces "
                          "to the GP and try again.")

        self.sync_data()

        x_0 = self.hyps

        opt_algorithm = f"{self.opt_algorithm}"

        args = (
            self.n_experts,
            self.name,
            self.kernel_grad,
            logger_name,
            self.cutoffs,
            self.hyps_mask,
            self.n_cpus,
            self.n_sample,
            self.per_expert_parallel,
        )
        func = rbcm_get_neg_like_grad

        if opt_algorithm in ["differential evolution", "dual annealing"]:
            args0 = (
                self.n_experts,
                self.name,
                self.kernel,
                logger_name,
                self.cutoffs,
                self.hyps_mask,
                self.n_cpus,
                self.n_sample,
                self.per_expert_parallel,
            )
            func0 = rbcm_get_neg_like

        res = None

        if self.bounds is None:
            bounds = np.array([(1e-6, 100)] * len(x_0))
            if self.hyps_mask.get("train_noise", True):
                bounds[-1, 0] = 1e-3
                bounds[-1, 1] = self.prior_variance
        elif custom_bounds is not None:
            bounds = custom_bounds
        else:
            bounds = self.bounds

        if opt_algorithm == "basin hopping":
            minimizer_kwargs = {
                "method": "L-BFGS-B",
                "jac": True,
                "args": args,
                "maxiter": 200,
            }
            res = basinhopping(func,
                               x_0,
                               minimizer_kwargs=minimizer_kwargs,
                               niter=self.maxiter)

        if opt_algorithm == "differential evolution":
            res = differential_evolution(func0,
                                         bounds,
                                         args=args0,
                                         maxiter=self.maxiter,
                                         polish=False,
                                         **kwargs)
            opt_algorithm = "L-BFGS-B"

        if opt_algorithm == "dual annealining":

            res = dual_annealing(func0,
                                 bounds,
                                 args=args0,
                                 maxiter=self.maxiter,
                                 x0=x_0,
                                 **kwargs)

        if opt_algorithm == "L-BFGS-B":

            # bound signal noise below to avoid overfitting
            # Catch linear algebra errors and switch to BFGS if necessary
            try:
                res = minimize(
                    func,
                    x_0,
                    args,
                    method="L-BFGS-B",
                    jac=True,
                    bounds=bounds,
                    options={
                        "disp": disp,
                        "gtol": grad_tol,
                        "maxls": line_steps,
                        "maxiter": self.maxiter,
                    },
                )
            except np.linalg.LinAlgError:
                logger = logging.getLogger(self.logger_name)
                logger.warning("Algorithm for L-BFGS-B failed. Changing to "
                               "BFGS for remainder of run.")
                opt_algorithm = "BFGS"

        if opt_algorithm == "BFGS":
            res = minimize(
                func,
                x_0,
                args,
                method="BFGS",
                jac=True,
                options={
                    "disp": disp,
                    "gtol": grad_tol,
                    "maxiter": self.maxiter
                },
            )

        if res is None:
            raise RuntimeError("Optimization failed for some reason.")
        self.hyps = res.x
        self.set_L_alpha()
        self.total_likelihood = -res.fun
        self.total_likelihood_gradient = -res.jac

        return res
Exemplo n.º 19
0
 def test_nb_fun_call(self):
     ret = dual_annealing(self.func, self.ld_bounds)
     assert_equal(self.nb_fun_call, ret.nfev)
Exemplo n.º 20
0
 def test_high_dim_no_ls(self):
     ret = dual_annealing(self.func, self.hd_bounds,
                          no_local_search=True)
     assert_allclose(ret.fun, 0., atol=1e-4)
Exemplo n.º 21
0
 def test_nb_fun_call_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
             no_local_search=True)
     assert_equal(self.nb_fun_call, ret.nfev)
Exemplo n.º 22
0
 def test_fun_args_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
                          args=((3.14159, )), no_local_search=True)
     assert_allclose(ret.fun, 3.14159, atol=1e-4)
Exemplo n.º 23
0
 def test_fun_args_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
                          args=((3.14159, )))
     assert_allclose(ret.fun, 3.14159, atol=1e-6)
Exemplo n.º 24
0
    return max(min(x,b), a)

def random_start():
    a, b = interval
    return a + (b - a) * rn.random_sample()

def cost_function(x):
    return f(x)

def random_neighbor(x, fraction=1):
    amplitude = (max(interval) - min(interval)) * fraction / 10
    delta = (-amplitude/2.0) + amplitude * rn.random_sample()
    return clip(x + delta)

def acceptance_probability(cost, new_cost, temperature):
    if new_cost < cost:
        return 1
    else:
        p = np.exp(- (new_cost - cost) / temperature)
        return p

def temperature(fraction):
    return max(0.01, min(1, 1 - fraction))

interval = (-10, 10)

state, cost, states, costs = annealing(random_start, cost_function, random_neighbor, acceptance_probability, temperature, maxsteps=30, debug=True)

### using scipy
res = dual_annealing(f, bounds=list(zip(-10, 10)), seed=1234)
Exemplo n.º 25
0
        I0, R0 = state_train_data["Active"].iloc[0], state_train_data[
            "Recovered"].iloc[0]
        if pd.isnull(R0):
            R0 = 0
        S0 = population - I0 - R0
        # A grid of time points (in days)
        t = np.linspace(0, train_days, train_days)
        # Initial conditions vector
        y0 = S0, I0, R0

        # Reasonings for bounds:
        # Contact rate: based on previous runs, the value is never over 0.2, so I bounded at 0.3
        # Recovery Rate: it usually takes at least 1-2 weeks to get over covid. 1/7 = 0.14
        x_min = optimize.dual_annealing(Get_MAPE,
                                        bounds=[[0, 0.3], [0, 0.15]],
                                        seed=1234,
                                        x0=[0, 0],
                                        maxiter=5000)
        #print(x_min.fun)
        #print(x_min.x)
        sum_mape = sum_mape + x_min.fun
        april_confirmed_data = Predict_April(x_min.x)

        for index, num_confirmed_case in enumerate(april_confirmed_data):
            submission_confirmed_df.loc[(index * 50) +
                                        state_id] = [num_confirmed_case]

        state_id = state_id + 1

        mean_test_mape = sum_mape / num_states
Exemplo n.º 26
0
 def fit(self, dataset, t,
     search_pop=True,
     Ro_bounds=None,
     pop_sens=[1e-3,1e-4],
     Ro_sens=[0.8,15],
     D_sens=[5,50],
     sigma_sens=None,
     notified_sens=None,
     sample_ponder=None,
     optim_verbose=False,
     **kwargs):
   """
     The method responsible for estimating a set of beta and r 
     parameters for the provided data set. It assumes that in 
     the data there is only one epidemic period.
     
     :param array dataset: list with the respective arrays of Suceptible, Infected, Recovered and Deaths.
     :param array t: The time respective to each set of samples.
     :param bool search_pop: Flag to set the exposed population search, for better Suceptible extimation values. Default is :code:`True`.
     :param list Ro_bounds: The bounds to build the constraints for :code:`Ro = Beta / r`. With minimun and maximun values, respectivelly.
     :param list pop_sens: The sensibility (boudaries) for the proportion of :code:`N` to be found by the pop parameters.
     :param list beta_sens: The beta parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,100]`.
     :param list r_sens: The r parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,1000]`.
     :param bool sample_ponder: The flag to set the pondering of the non informative recovered data.
     :param bool optim_verbose: If :code:`True`, after fitting will show the optimization summary.
     :param dict **kwargs: The optimization search algorithms options.
   """
   # Create the data values including
   # the Susceptible, Infected, 
   # Recovered and Death data into 
   # their respective variables
   S, I, R, D = None, None, None, None
   if "S" in dataset:
     S = dataset["S"]
   if "R" in dataset:
     R = dataset["R"]
   if "D" in dataset:
     D = dataset["D"]
   I = dataset["I"]
   # Check for the several possible 
   # pondering variables and create
   # the flags to ensure pondering
   self.ponder = sample_ponder != None
   self.__exposed_flag = sigma_sens != None
   self._search_pop = search_pop
   # Computing the approximate values 
   # of the parameters to build the 
   # parameter boundaries
   lower = [Ro_sens[0], D_sens[0]]
   upper = [Ro_sens[1], D_sens[1]]
   # Create the nonlinear constraints for 
   # the basic parameters. Now only the 
   # Ro parameter contraint is checked.
   constraints = ()
   if Ro_bounds != None:
     nlc = NonlinearConstraint(ct.Ro_decimal_constr, Ro_bounds[0], Ro_bounds[1])
     constraints = (nlc)
   # Create the train data for minimization
   # and compute the initial conditions for 
   # the model simulation and the weights 
   # for pondering each time series
   w = [1/np.mean(S), 1/np.mean(I)]
   datatrain = [S, I]
   y0 = [S[0], I[0]] 
   if "R" in self.focus:
     datatrain.append(R)
     y0.append(R[0])
     w.append(1/np.mean(R))
   if "E" in self.focus:
     lower.append(sigma_sens[0])
     upper.append(sigma_sens[1])
     y0.insert(1, 1.0)
   if "N" in self.focus:
     for item in notified_sens.keys():
       print("\t ├─ Including {} bound!".format(item))
       lower.append(notified_sens[item][0])
       upper.append(notified_sens[item][1])
     y0.insert(2, I[0])
   # Population proportion boundaries
   if self._search_pop:
     lower.append(pop_sens[0])
     upper.append(pop_sens[1])
   # Provide a summary of the model 
   # so far, and show the optimazation 
   # setup
   if self.verbose:
     print("\t ├─ S(0) ─ I(0) ─ R(0) ─ ", y0)
     print("\t ├─ Ro bound ─  ", lower[0], " ─ ", upper[0])
     print("\t ├─ D  bound ─  ", lower[1], " ─ ", upper[1])
     if self.__exposed_flag:
       print("\t ├─ sigma bound ─  ", lower[2], " ─ ", upper[2])
     print("\t ├─ equation weights ─  ", w)
     print("\t ├─ Running on ─ ", self.__search_alg, "SciPy Search Algorithm")
   # Run the searching algorithm to 
   # minimize the cost function... 
   # There are three possible minimization
   # algorithms to be used. This is 
   # controlled by the flag on the 
   # __init__ method.
   if self.__search_alg == "differential_evolution":
     summary = differential_evolution(
         self.cost_wrapper, 
         list(zip(lower, upper)),
         maxiter=10000,
         popsize=35,
         mutation=(0.5, 1.2),
         strategy="best1exp",
         tol=1e-4,
         args=(datatrain, y0, t, w),
         constraints=constraints,
         # updating='deferred',
         # workers=-1,
         # disp=True
       )
   elif self.__search_alg == "dual_annealing":
     summary = dual_annealing(
         self.cost_wrapper, 
         list(zip(lower, upper)),
         maxiter=10000,
         args=(datatrain, y0, t, w)
       )
   elif self.__search_alg == "shgo":
     summary = shgo(
         self.cost_wrapper,
         list(zip(lower, upper)),
         n=500, iters=10,
         sampling_method="sobol",
         args=(datatrain, y0, t, w)
       )
   # Saving the estimated parameters
   self.parameters = summary.x
   # Printing summary
   if self.verbose:
     print("\t └─ Defined at: ", self.parameters[0], " ─ ", self.parameters[1], "\n")
   if optim_verbose:
     print(summary)
Exemplo n.º 27
0
def solve_and_predict_area(
        tuple_area_state_: tuple,
        yesterday_: str,
        past_parameters_: pd.DataFrame,
        popcountries: pd.DataFrame,
        startT: str = None, # added to change optimmization start date
):
    """
    Parallelizable version of the fitting & solving process for DELPHI V3, this function is called with multiprocessing
    :param tuple_area_: tuple corresponding to (continent, country, province)
    :param yesterday_: string corresponding to the date from which the model will read the previous parameters. The
    format has to be 'YYYYMMDD'
    :param past_parameters_: Parameters from yesterday_ used as a starting point for the fitting process
    :startT: date from where the model will be started (format should be 'YYYY-MM-DD')
    :return: either None if can't optimize (either less than 100 cases or less than 7 days with 100 cases) or a tuple
    with 3 dataframes related to that tuple_area_ (parameters df, predictions since yesterday_+1, predictions since
    first day with 100 cases) and a scipy.optimize object (OptimizeResult) that contains the predictions for all
    16 states of the model (and some other information that isn't used)
    """
    time_entering = time.time()
    continent, country, province, initial_state = tuple_area_state_
    country_sub = country.replace(" ", "_")
    province_sub = province.replace(" ", "_")
    print(f"starting to predict for {continent}, {country}, {province}")
    if os.path.exists(PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"):
        totalcases = pd.read_csv(
            PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"
        )
        if totalcases.day_since100.max() < 0:
            logging.warning(
                f"Not enough cases (less than 100) for Continent={continent}, Country={country} and Province={province}"
            )
            return None

        if past_parameters_ is not None:
            parameter_list_total = past_parameters_[
                (past_parameters_.Country == country)
                & (past_parameters_.Province == province)
            ].reset_index(drop=True)
            if len(parameter_list_total) > 0:
                parameter_list_line = parameter_list_total.iloc[-1, :].values.tolist()
                parameter_list = parameter_list_line[5:]
                bounds_params = get_bounds_params_from_pastparams(
                    optimizer=OPTIMIZER,
                    parameter_list=parameter_list,
                    dict_default_reinit_parameters=dict_default_reinit_parameters,
                    percentage_drift_lower_bound=percentage_drift_lower_bound,
                    default_lower_bound=default_lower_bound,
                    dict_default_reinit_lower_bounds=dict_default_reinit_lower_bounds,
                    percentage_drift_upper_bound=percentage_drift_upper_bound,
                    default_upper_bound=default_upper_bound,
                    dict_default_reinit_upper_bounds=dict_default_reinit_upper_bounds,
                    percentage_drift_lower_bound_annealing=percentage_drift_lower_bound_annealing,
                    default_lower_bound_annealing=default_lower_bound_annealing,
                    percentage_drift_upper_bound_annealing=percentage_drift_upper_bound_annealing,
                    default_upper_bound_annealing=default_upper_bound_annealing,
                    default_lower_bound_jump=default_lower_bound_jump,
                    default_upper_bound_jump=default_upper_bound_jump,
                    default_lower_bound_std_normal=default_lower_bound_std_normal,
                    default_upper_bound_std_normal=default_upper_bound_std_normal,
                )
                start_date = pd.to_datetime(parameter_list_line[3])
                bounds_params = tuple(bounds_params)
            else:
                # Otherwise use established lower/upper bounds
                parameter_list = default_parameter_list
                bounds_params = default_bounds_params
                start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])
        else:
            # Otherwise use established lower/upper bounds
            parameter_list = default_parameter_list
            bounds_params = default_bounds_params
            start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])

        if startT is not None:
            start_date = max(pd.to_datetime(startT), start_date)
            validcases = totalcases[
                (totalcases.date >= str(start_date))
                & (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))
            ][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
        else:
            validcases = totalcases[
                (totalcases.day_since100 >= 0)
                & (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))
            ][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
        # Now we start the modeling part:
        if len(validcases) <= validcases_threshold:
            logging.warning(
                f"Not enough historical data (less than a week)"
                + f"for Continent={continent}, Country={country} and Province={province}"
            )
            return None
        else:
            PopulationT = popcountries[
                (popcountries.Country == country) & (popcountries.Province == province)
            ].pop2016.iloc[-1]
            N = PopulationT
            PopulationI = validcases.loc[0, "case_cnt"]
            PopulationD = validcases.loc[0, "death_cnt"]
            if initial_state is not None:
                R_0 = initial_state[9]
            else:
                R_0 = validcases.loc[0, "death_cnt"] * 5 if validcases.loc[0, "case_cnt"] - validcases.loc[0, "death_cnt"]> validcases.loc[0, "death_cnt"] * 5 else 0
                bounds_params_list = list(bounds_params)
                bounds_params_list[-1] = (0.999,1)
                bounds_params = tuple(bounds_params_list)
            cases_t_14days = totalcases[totalcases.date >= str(start_date- pd.Timedelta(14, 'D'))]['case_cnt'].values[0]
            deaths_t_9days = totalcases[totalcases.date >= str(start_date - pd.Timedelta(9, 'D'))]['death_cnt'].values[0]
            R_upperbound = validcases.loc[0, "case_cnt"] - validcases.loc[0, "death_cnt"]
            R_heuristic = cases_t_14days - deaths_t_9days
            if int(R_0*p_d) >= R_upperbound and R_heuristic >= R_upperbound:
                logging.error(f"Initial conditions for PopulationR too high for {country}-{province}, on {startT}")

            """
            Fixed Parameters based on meta-analysis:
            p_h: Hospitalization Percentage
            RecoverHD: Average Days until Recovery
            VentilationD: Number of Days on Ventilation for Ventilated Patients
            maxT: Maximum # of Days Modeled
            p_d: Percentage of True Cases Detected
            p_v: Percentage of Hospitalized Patients Ventilated,
            balance: Regularization coefficient between cases and deaths
            """
            maxT = (default_maxT - start_date).days + 1
            t_cases = validcases["day_since100"].tolist() - validcases.loc[0, "day_since100"]
            balance, cases_data_fit, deaths_data_fit = create_fitting_data_from_validcases(validcases)
            GLOBAL_PARAMS_FIXED = (N, R_upperbound, R_heuristic, R_0, PopulationD, PopulationI, p_d, p_h, p_v)

            def model_covid(
                t, x, alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3
            ) -> list:
                """
                SEIR based model with 16 distinct states, taking into account undetected, deaths, hospitalized and
                recovered, and using an ArcTan government response curve, corrected with a Gaussian jump in case of
                a resurgence in cases
                :param t: time step
                :param x: set of all the states in the model (here, 16 of them)
                :param alpha: Infection rate
                :param days: Median day of action (used in the arctan governmental response)
                :param r_s: Median rate of action (used in the arctan governmental response)
                :param r_dth: Rate of death
                :param p_dth: Initial mortality percentage
                :param r_dthdecay: Rate of decay of mortality percentage
                :param k1: Internal parameter 1 (used for initial conditions)
                :param k2: Internal parameter 2 (used for initial conditions)
                :param jump: Amplitude of the Gaussian jump modeling the resurgence in cases
                :param t_jump: Time where the Gaussian jump will reach its maximum value
                :param std_normal: Standard Deviation of the Gaussian jump (~ time span of the resurgence in cases)
                :return: predictions for all 16 states, which are the following
                [0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D, 11 TH, 12 DVR,13 DVD, 14 DD, 15 DT]
                """
                r_i = np.log(2) / IncubeD  # Rate of infection leaving incubation phase
                r_d = np.log(2) / DetectD  # Rate of detection
                r_ri = np.log(2) / RecoverID  # Rate of recovery not under infection
                r_rh = np.log(2) / RecoverHD  # Rate of recovery under hospitalization
                r_rv = np.log(2) / VentilatedD  # Rate of recovery under ventilation
                gamma_t = (
                    (2 / np.pi) * np.arctan(-(t - days) / 20 * r_s) + 1
                    + jump * np.exp(-(t - t_jump) ** 2 / (2 * std_normal ** 2))
                )
                p_dth_mod = (2 / np.pi) * (p_dth - 0.001) * (np.arctan(-t / 20 * r_dthdecay) + np.pi / 2) + 0.001
                assert (
                    len(x) == 16
                ), f"Too many input variables, got {len(x)}, expected 16"
                S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT = x
                # Equations on main variables
                dSdt = -alpha * gamma_t * S * I / N
                dEdt = alpha * gamma_t * S * I / N - r_i * E
                dIdt = r_i * E - r_d * I
                dARdt = r_d * (1 - p_dth_mod) * (1 - p_d) * I - r_ri * AR
                dDHRdt = r_d * (1 - p_dth_mod) * p_d * p_h * I - r_rh * DHR
                dDQRdt = r_d * (1 - p_dth_mod) * p_d * (1 - p_h) * I - r_ri * DQR
                dADdt = r_d * p_dth_mod * (1 - p_d) * I - r_dth * AD
                dDHDdt = r_d * p_dth_mod * p_d * p_h * I - r_dth * DHD
                dDQDdt = r_d * p_dth_mod * p_d * (1 - p_h) * I - r_dth * DQD
                dRdt = r_ri * (AR + DQR) + r_rh * DHR
                dDdt = r_dth * (AD + DQD + DHD)
                # Helper states (usually important for some kind of output)
                dTHdt = r_d * p_d * p_h * I
                dDVRdt = r_d * (1 - p_dth_mod) * p_d * p_h * p_v * I - r_rv * DVR
                dDVDdt = r_d * p_dth_mod * p_d * p_h * p_v * I - r_dth * DVD
                dDDdt = r_dth * (DHD + DQD)
                dDTdt = r_d * p_d * I
                return [
                    dSdt, dEdt, dIdt, dARdt, dDHRdt, dDQRdt, dADdt, dDHDdt,
                    dDQDdt, dRdt, dDdt, dTHdt, dDVRdt, dDVDdt, dDDdt, dDTdt,
                ]

            def residuals_totalcases(params) -> float:
                """
                Function that makes sure the parameters are in the right range during the fitting process and computes
                the loss function depending on the optimizer that has been chosen for this run as a global variable
                :param params: currently fitted values of the parameters during the fitting process
                :return: the value of the loss function as a float that is optimized against (in our case, minimized)
                """
                # Variables Initialization for the ODE system
                alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = params
                # Force params values to stay in a certain range during the optimization process with re-initializations
                params = (
                    max(alpha, dict_default_reinit_parameters["alpha"]),
                    days,
                    max(r_s, dict_default_reinit_parameters["r_s"]),
                    max(min(r_dth, 1), dict_default_reinit_parameters["r_dth"]),
                    max(min(p_dth, 1), dict_default_reinit_parameters["p_dth"]),
                    max(r_dthdecay, dict_default_reinit_parameters["r_dthdecay"]),
                    max(k1, dict_default_reinit_parameters["k1"]),
                    max(k2, dict_default_reinit_parameters["k2"]),
                    max(jump, dict_default_reinit_parameters["jump"]),
                    max(t_jump, dict_default_reinit_parameters["t_jump"]),
                    max(std_normal, dict_default_reinit_parameters["std_normal"]),
                    max(k3, dict_default_reinit_lower_bounds["k3"]),
                )

                x_0_cases = get_initial_conditions(
                    params_fitted=params, global_params_fixed=GLOBAL_PARAMS_FIXED
                )
                x_sol_total = solve_ivp(
                    fun=model_covid,
                    y0=x_0_cases,
                    t_span=[t_cases[0], t_cases[-1]],
                    t_eval=t_cases,
                    args=tuple(params),
                )
                x_sol = x_sol_total.y
                weights = list(range(1, len(cases_data_fit) + 1))
                # weights = [(x/len(cases_data_fit))**2 for x in weights]
                if x_sol_total.status == 0:
                    residuals_value = get_residuals_value(
                        optimizer=OPTIMIZER,
                        balance=balance,
                        x_sol=x_sol,
                        cases_data_fit=cases_data_fit,
                        deaths_data_fit=deaths_data_fit,
                        weights=weights
                    )
                else:
                    residuals_value = 1e16
                return residuals_value

            if OPTIMIZER in ["tnc", "trust-constr"]:
                output = minimize(
                    residuals_totalcases,
                    parameter_list,
                    method=OPTIMIZER,
                    bounds=bounds_params,
                    options={"maxiter": max_iter},
                )
            elif OPTIMIZER == "annealing":
                output = dual_annealing(
                    residuals_totalcases, x0=parameter_list, bounds=bounds_params
                )
            else:
                raise ValueError("Optimizer not in 'tnc', 'trust-constr' or 'annealing' so not supported")

            if (OPTIMIZER in ["tnc", "trust-constr"]) or (OPTIMIZER == "annealing" and output.success):
                best_params = output.x
                t_predictions = [i for i in range(maxT)]
    
                def solve_best_params_and_predict(optimal_params):
                    # Variables Initialization for the ODE system
                    alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = optimal_params
                    optimal_params = [
                        max(alpha, dict_default_reinit_parameters["alpha"]),
                        days,
                        max(r_s, dict_default_reinit_parameters["r_s"]),
                        max(min(r_dth, 1), dict_default_reinit_parameters["r_dth"]),
                        max(min(p_dth, 1), dict_default_reinit_parameters["p_dth"]),
                        max(r_dthdecay, dict_default_reinit_parameters["r_dthdecay"]),
                        max(k1, dict_default_reinit_parameters["k1"]),
                        max(k2, dict_default_reinit_parameters["k2"]),
                        max(jump, dict_default_reinit_parameters["jump"]),
                        max(t_jump, dict_default_reinit_parameters["t_jump"]),
                        max(std_normal, dict_default_reinit_parameters["std_normal"]),
                        max(k3, dict_default_reinit_lower_bounds["k3"]),
                    ]
                    x_0_cases = get_initial_conditions(
                        params_fitted=optimal_params,
                        global_params_fixed=GLOBAL_PARAMS_FIXED,
                    )
                    x_sol_best = solve_ivp(
                        fun=model_covid,
                        y0=x_0_cases,
                        t_span=[t_predictions[0], t_predictions[-1]],
                        t_eval=t_predictions,
                        args=tuple(optimal_params),
                    ).y
                    return x_sol_best

                x_sol_final = solve_best_params_and_predict(best_params)
                data_creator = DELPHIDataCreator(
                    x_sol_final=x_sol_final,
                    date_day_since100=start_date,
                    best_params=best_params,
                    continent=continent,
                    country=country,
                    province=province,
                    testing_data_included=False,
                )
                mape_data = get_mape_data_fitting(
                    cases_data_fit=cases_data_fit, deaths_data_fit=deaths_data_fit, x_sol_final=x_sol_final
                )
                
                logging.info(f"In-Sample MAPE Last 15 Days {country, province}: {round(mape_data, 3)} %")
                logging.debug(f"Best fitted parameters for {country, province}: {best_params}")
                df_parameters_area = data_creator.create_dataset_parameters(mape_data)
                # Creating the datasets for predictions of this area
                if GET_CONFIDENCE_INTERVALS:
                   df_predictions_since_today_area, df_predictions_since_100_area = (
                       data_creator.create_datasets_with_confidence_intervals(
                           cases_data_fit, deaths_data_fit,
                           past_prediction_file=PATH_TO_FOLDER_DANGER_MAP + f"predicted/Global_V4_{past_prediction_date}.csv",
                           past_prediction_date=str(pd.to_datetime(past_prediction_date).date()))
                   )
                else:
                    df_predictions_since_today_area, df_predictions_since_100_area = data_creator.create_datasets_predictions()
                logging.info(
                    f"Finished predicting for Continent={continent}, Country={country} and Province={province} in "
                    + f"{round(time.time() - time_entering, 2)} seconds"
                )
                logging.info("--------------------------------------------------------------------------------------------")
                return (
                    df_parameters_area,
                    df_predictions_since_today_area,
                    df_predictions_since_100_area,
                    output,
                )
            else:
                return None
    else:  # file for that tuple (continent, country, province) doesn't exist in processed files
        logging.info(
            f"Skipping Continent={continent}, Country={country} and Province={province} as no processed file available"
        )
        return None
Exemplo n.º 28
0
 def optimize(self):
     Algo.optimize(self)
     dual_annealing(
         func=self._funcwrapped, x0=self._xinit,
         bounds=list(zip(self._lower, self._upper)), maxiter=MAX_IT)
Exemplo n.º 29
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        if 'maxiter' not in self.opt_settings:  # lets you override the value in options
            self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = (
                    'The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                    'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if opt in _gradient_optimizers and 'linear' in meta and meta[
                        'linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = (
                            'The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                            'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(fun=signature_extender(
                            weak_method_wrapper(self, '_con_val_func'), args),
                                                  lb=lb,
                                                  ub=ub,
                                                  jac=signature_extender(
                                                      weak_method_wrapper(
                                                          self,
                                                          '_congradfunc'),
                                                      args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = weak_method_wrapper(self, '_confunc')
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = weak_method_wrapper(
                                self, '_congradfunc')
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = weak_method_wrapper(
                                self, '_confunc')
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = weak_method_wrapper(
                                    self, '_congradfunc')
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if ((self._coloring_info['coloring'] is None
                 and self._coloring_info['dynamic'])
                    or self.options['dynamic_simul_derivs']):
                if self.options['dynamic_simul_derivs']:
                    warn_deprecation(
                        "The 'dynamic_simul_derivs' option has been deprecated. Call "
                        "the 'declare_coloring' function instead.")
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

                # if the improvement wasn't large enough, turn coloring off
                info = self._coloring_info
                if info['coloring'] is not None:
                    pct = info['coloring']._solves_info()[-1]
                    if info['min_improve_pct'] > pct:
                        info['coloring'] = info['static'] = info[
                            'dynamic'] = None
                        simple_warning(
                            "%s: Coloring was deactivated.  Improvement of %.1f%% was "
                            "less than min allowed (%.1f%%)." %
                            (self.msginfo, pct, info['min_improve_pct']))

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(
                    self._objfunc,
                    x_init,
                    # args=(),
                    method=opt,
                    jac=jac,
                    hess=hess,
                    # hessp=None,
                    bounds=bounds,
                    constraints=constraints,
                    tol=self.options['tol'],
                    # callback=None,
                    options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {
                        "method": "L-BFGS-B",
                        "jac": True
                    }
                self.opt_settings.pop(
                    'maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([
                            b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)
                        ])
                        user_test = self.opt_settings.pop('accept_test',
                                                          None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun,
                                      x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n',
                              'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs[
                        'minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Exemplo n.º 30
0
 def test_fun_args_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
                          args=((3.14159, )), no_local_search=True,
                          seed=self.seed)
     assert_allclose(ret.fun, 3.14159, atol=1e-4)
Exemplo n.º 31
0
 def test_high_dim_no_ls(self):
     ret = dual_annealing(self.func,
                          self.hd_bounds,
                          no_local_search=True,
                          seed=self.seed)
     assert_allclose(ret.fun, 0., atol=1e-4)
 def fit(self, initS, initE, initI, initD, initC, Y):
     self.lossing = []
     args = (initS, initE, initI, initD, initC, Y['确诊', '死亡', '治愈'].toarray())
     param = [(0, 1),] * 5
     result = dual_annealing(self._optimize, param, args=args, seed=30, maxiter=10) ['x']
     self.P = SEIDC_PARAM(*result)
Exemplo n.º 33
0
 def test_nb_fun_call_no_ls(self):
     ret = dual_annealing(self.func,
                          self.ld_bounds,
                          no_local_search=True,
                          seed=self.seed)
     assert_equal(self.nb_fun_call, ret.nfev)
if __name__ == '__main__':
    ratio=0.5
    mask_x=np.ones((5,1))
    mask_x[0,0]=0.5
    mask_x[-1,0]=0.5
    mask_y=np.ones((1,5))
    mask_y[0,0]=0.5
    mask_y[0,-1]=0.5
    mask=mask_x@mask_y*0.25**2
    mask=mask.reshape(-1)
    img_ele=25
    lw = [0.] * img_ele
    up = [1.] * img_ele
    bound = list(zip(lw, up))
    count = 0
    y_list=[]

    def func_uncon(x):
        global count
        count += 1
        y = eng.func_python_api(x.tolist())
        print(f'count={count}, y={y:.4f}')
        y_list.append(y)
        return y


    time_start = time.time()

    ret = dual_annealing(func_uncon, bounds=list(zip(lw, up)), seed=0, maxiter=100000, maxfun=2000)

    scio.savemat('data_gsa.mat', {'y': y_list})
Exemplo n.º 35
0
 def test_max_fun_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds,
                          no_local_search=True, maxfun=500)
     assert ret.nfev <= 500
Exemplo n.º 36
0
    def solve(self):
        '''Run the optimization of the pressurized pipe network
        
        Return
        ------
        The best solution found , where
            solution: numpy int array, sizes of pipes, according to series.
        
        If no solution is found return None. 
            
        The optimized epanet model is saved in a new file.
        '''
        startime = clock()
        solution = None
        reducted = False
        print('SOLVING')
        print('The solver started at: ' + strftime("%H:%M:%S", localtime()))

        # SELECT ALGORITHM
        if self.algorithm == A_GD:
            # GRADIENT DESCENT ALGORITHM
            print('*** GRADIENT DESCENT ALGORITHM ***')

            # SET TO 0 AND INITIAL PRESSURE CHECKING
            self.set_x(np.zeros(self.dimension, dtype=np.int))

            while True:
                # CHECK PRESSURES
                status, headlosses = self.check(mode='GD')
                if status:
                    # PRESSURES OK END OF LOOP
                    break

                # INCREASE DIAMETER
                for index in np.nditer(headlosses):
                    x = self.get_x()
                    if x[index] < self.ubound[index]:
                        x[index] += 1
                        self.set_x(x)
                        break

            if status:
                solution = self.get_x().copy()

        if self.algorithm in [A_DE, A_DA]:
            # DIFFEERENTIAL EVOLUTION / DUAL ANNEALING ALGORITHM
            # SET BOUNDS
            tmp = list(zip(self.lbound, self.ubound))
            self.bounds = np.array(tmp, dtype=np.int)

            def objetive(x):
                self.set_x(np.array([round(i) for i in x[:]], np.int))
                if self.check(mode='TF'):
                    return self.get_cost()
                else:
                    return PENALTY

            # SOLVE
            if self.algorithm == A_DE:
                # DIFFEERENTIAL EVOLUTION
                from scipy.optimize import differential_evolution
                print('*** DIFFERENTIAL EVOLUTION ALGORITHM ***')
                result = differential_evolution(objetive, self.bounds)
            else:
                # DUAL ANNEALING ALGORITHM
                from scipy.optimize import dual_annealing
                print('*** DUAL ANNEALING ALGORITHM ***')
                result = dual_annealing(objetive, self.bounds)

            # CHECK
            tmp = [round(i) for i in result.x[:]]
            tmp = np.array(tmp, dtype=np.int)
            self.set_x(tmp)
            if self.check(mode='TF'):
                solution = self.get_x().copy()
            else:
                solution = None

        if self.polish and (type(solution) != type(None)):
            # POLISH ALGORITHM
            maxredxset = [0.0, []]

            def search_reduc(savings, redxset):
                '''
                Searh possible reduction of pipe diameters
                
                redxset: list of ordered by index pipe-set which diameter can 
                    be reduced 1-step according to pipe series.
                
                savings: reduction of cost reached applying redxset
                
                If a pipe can be reduced, it is added, starting a recursively
                precces that stop when no pipe can be reduced, then the reduction
                cost is compared whith previous max reduccion, updating it.
                     
                Return
                ------
                Update maxredset 
                '''
                changes = False
                # SET TO SOL - REDUCTIONS
                newx = solution.copy()

                if len(redxset) > 0:
                    start = redxset[-1]
                else:
                    start = 0
                for i in redxset[:]:
                    newx[i] -= 1
                # SEARCH FOR A POSSIBLE REDUCIBLE PIPE
                for i in range(start, len(self._x)):
                    if newx[i] > 0:
                        # REDUCE DIAMETER
                        newx[i] -= 1
                        # CHECK PRESSURES
                        self.set_x(newx)
                        if self.check(mode='TF'):
                            # ACEPPT CHANGES
                            changes = True
                            series = self.catalog[self.pipes[i]['series']]
                            c1 = series[newx[i] + 1]['price']
                            c2 = series[newx[i]]['price']
                            l = self.pipes[i]['length']
                            newsavings = savings + (c1 - c2) * l
                            newredxset = redxset.copy()
                            newredxset.append(i)
                            search_reduc(newsavings, newredxset)
                        else:
                            # UNDO
                            newx[i] += 1
                if not changes:
                    # CHECK AND UPDATE MAX REDUCTION SET
                    if savings > maxredxset[0]:
                        maxredxset[0] = savings
                        maxredxset[1] = redxset

            print('+++ POLISH ALGORITHM +++')
            search_reduc(0.0, [])
            print('The maximum reduction cost is: %.2f' % (maxredxset[0]))
            if maxredxset[0] > 0:
                reducted = True
                for i in maxredxset[1][:]:
                    solution[i] -= 1

        # SOLUTION
        if type(solution) != type(None):
            print('Solving was successful.')
            self.set_x(solution)
            cost = self.get_cost()
            print('Network cost is: %.2f' % (cost))
            solvedfn = self.inpfn[:-4] + '_Solved_'
            if self.algorithm == A_GD:
                solvedfn += 'GD'
            elif self.algorithm == A_DE:
                solvedfn += 'DE'
            elif self.algorithm == A_DA:
                solvedfn += 'DA'
            if reducted:
                solvedfn += '+Polish.inp'
            else:
                solvedfn += '.inp'
            self.save_file(solvedfn)
            print('Sized network saved in: %s' % (solvedfn))
        else:
            print('No solution found.')

        # DURATION
        print('Finished at:', strftime("%H:%M:%S"), end='')
        print('. Duration = ', clock() - startime)
        print('-' * 80)

        return solution
Exemplo n.º 37
0
from scipy.optimize import minimize, dual_annealing
import math


def ackley(X):
    """
    global optimum is at (0, 0) = 0
    :param X:
    :return:
    """
    x1 = X[0]
    x2 = X[1]
    part_1 = -0.2 * math.sqrt(0.5 * (x1 * x1 + x2 * x2))
    part_2 = 0.5 * (math.cos(2 * math.pi * x1) + math.cos(2 * math.pi * x2))
    value = math.exp(1) + 20 - 20 * math.exp(part_1) - math.exp(part_2)
    return value


# note, local optimizer failed
res = dual_annealing(ackley, [(-5, 5), (-5, 5)])
print(res)
Exemplo n.º 38
0
    def _helper_run_appropriate_fitter(self,lowerbounds_list: list,
                                       upperbounds_list: list,
                                       bounds_not_least_squares: sopt.Bounds): 
        """
        We start with an instance of Fitmodel class, which is saved as 
        self.fitmodel_input
        This instance has the necessary data to run the fit, including the appropriate 
        fit method string name

        Return: optimization output or None
        depending on whether the fit was successful or not
        """
                
        if self.fitmodel_input.minimization_method_str == "least_squares":
            fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.least_squares(fit_function_callable,
                                                      np.array(list(self.fitmodel_input.start_paramdict.values())),
                                                      args=(self.fitmodel_input.xvals,
                                                            self.fitmodel_input.yvals,
                                                            self.fitmodel_input.errorbars),
                                                      bounds=(lowerbounds_list, upperbounds_list),
                                                      loss="linear", f_scale=1)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "minimize":
            fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.minimize(sum_squares_decorator(fit_function_callable),
                                                np.array(list(self.fitmodel_input.start_paramdict.values())),
                                                args=(self.fitmodel_input.xvals,
                                                      self.fitmodel_input.yvals,
                                                      self.fitmodel_input.errorbars),
                                                bounds=bounds_not_least_squares,
                                                **self.fitmodel_input.fitter_options_dict)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "basinhopping":
            fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.basinhopping(
                sum_squares_decorator(fit_function_callable),
                np.array(list(self.fitmodel_input.start_paramdict.values())),
                minimizer_kwargs = {"args":(self.fitmodel_input.xvals,
                      self.fitmodel_input.yvals,
                      self.fitmodel_input.errorbars),
                                    "method":"trust-constr"}, # TODO: figure out a smart thing to use here
                **self.fitmodel_input.fitter_options_dict)
            # The next lines is just for now the weirdness of basinhopping, it doesn't
            # have the global attribute called success
            setattr(optimization_output,"success",optimization_output.lowest_optimization_result.success)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "differential_evolution":
            fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.differential_evolution(
                sum_squares_decorator(fit_function_callable),
                bounds_not_least_squares,
                args=(self.fitmodel_input.xvals,
                      self.fitmodel_input.yvals,
                      self.fitmodel_input.errorbars),
            **self.fitmodel_input.fitter_options_dict)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "shgo":
            fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.shgo(
                sum_squares_decorator(fit_function_callable),
                tuple(zip(lowerbounds_list,upperbounds_list)),
                args=(self.fitmodel_input.xvals,
                      self.fitmodel_input.yvals,
                      self.fitmodel_input.errorbars),
            **self.fitmodel_input.fitter_options_dict)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "dual_annealing":
            fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)
            optimization_output = sopt.dual_annealing(
                sum_squares_decorator(fit_function_callable),
                tuple(zip(lowerbounds_list,upperbounds_list)),
                args=(self.fitmodel_input.xvals,
                      self.fitmodel_input.yvals,
                      self.fitmodel_input.errorbars),
            **self.fitmodel_input.fitter_options_dict)
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "findmax":
            # make a copy so that we can go about deleting the max value to find the next
            # max and so on
            peaks_xvals = []
            peaks_yvals = []
            data_array_copy = self.fitmodel_input.yvals.copy()
            # find max, then replace that point with the average, find the next max 
            # and keep going until found as many maxima as requested
            for peak_num in range(self.fitmodel_input.start_paramdict["numpeaks"]):
                peakval_y = np.nanmax(data_array_copy)
                peakcoord = np.argmax(data_array_copy)
                peakval_x = self.fitmodel_input.xvals[peakcoord]
                peaks_xvals.append(peakval_x)
                peaks_yvals.append(peakval_y)
                data_array_copy[peakcoord] = np.mean(data_array_copy)
            # we now have to build the optimization_output object that will look similar to what it looks like for regular fits
            param_dict_length = len(self.fitmodel_input.start_paramdict)
            optimization_output = types.SimpleNamespace() # this just initializes an empty class
            optimization_output.fun = -1 # objective function is -1, because it has no meaning here
            optimization_output.x = [peaks_xvals,peaks_yvals]
            # we now add the values to the "output" which are not real fit parameters
            # in normal fitting these are always fit parameters, but since this is a "fake" fit, we can simply add the initial parameters just to keep the interface constant
            for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):
                if idx >= len(optimization_output.x):
                    optimization_output.x.append(self.fitmodel_input.start_paramdict[key])
            optimization_output.success = True
            return optimization_output
        elif self.fitmodel_input.minimization_method_str == "findmin":
            # make a copy so that we can go about deleting the max value to find the next
            # max and so on
            peaks_xvals = []
            peaks_yvals = []
            data_array_copy = self.fitmodel_input.yvals.copy()
            # find max, then replace that point with the average, find the next max 
            # and keep going until found as many maxima as requested
            for peak_num in range(self.fitmodel_input.start_paramdict["numpeaks"]):
                peakval_y = np.nanmin(data_array_copy)
                peakcoord = np.argmin(data_array_copy)
                peakval_x = self.fitmodel_input.xvals[peakcoord]
                peaks_xvals.append(peakval_x)
                peaks_yvals.append(peakval_y)
                data_array_copy[peakcoord] = np.mean(data_array_copy)
            # we now have to build the optimization_output object that will look similar to what it looks like for regular fits
            param_dict_length = len(self.fitmodel_input.start_paramdict)
            optimization_output = types.SimpleNamespace() # this just initializes an empty class
            optimization_output.fun = -1 # objective function is -1, because it has no meaning here
            optimization_output.x = [peaks_xvals,peaks_yvals]
            for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):
                if idx >= len(optimization_output.x):
                    optimization_output.x.append(self.fitmodel_input.start_paramdict[key])
            optimization_output.success = True
            return optimization_output
        else:
            print(
                """Message from Class {:s} function _helper_run_appropriate_fitter: 
                you tried to use the following optimizer: {}. 
                This optimizer does not exist. Not doing any optimization""".format(
                    self.__class__.__name__, self.fitmodel_input.minimization_method_str))
            return None
Exemplo n.º 39
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                       'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                               'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(fun=signature_extender(self._con_val_func, args),
                                                  lb=lb, ub=ub,
                                                  jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(of=lincons, wrt=self._dvlist,
                                                              return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_sparsity and self.options['dynamic_simul_derivs']:
            coloring_mod.dynamic_simul_coloring(self, run_model=False, do_sparsity=False)

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(self._objfunc, x_init,
                                  # args=(),
                                  method=opt,
                                  jac=jac,
                                  hess=hess,
                                  # hessp=None,
                                  bounds=bounds,
                                  constraints=constraints,
                                  tol=self.options['tol'],
                                  # callback=None,
                                  options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {"method": "L-BFGS-B", "jac": True}
                self.opt_settings.pop('maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)])
                        user_test = self.opt_settings.pop('accept_test', None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun, x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n', 'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs['minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Exemplo n.º 40
0
import random
from scipy.optimize import basinhopping, differential_evolution
# https://docs.scipy.org/doc/scipy-1.2.0/reference/optimize.html#module-scipy.optimize

dim = benchmarks.common_dim
info = benchmarks.get_info(benchmarks.common_f_id, dim)
bounds = [(info['lower'], info['upper']) for i in range(dim)]
if False:
    x0 = np.ones((dim)) * -1
    res = basinhopping(benchmarks.f,
                       x0,
                       minimizer_kwargs={"method": "BFGS"},
                       niter=26000 * dim)
    print(res)
if False:
    maxiter = 1000
    popsize = 500
    print("max func eval = ", maxiter * popsize * dim)
    workers = 1
    res = differential_evolution(benchmarks.f,
                                 bounds,
                                 maxiter=maxiter,
                                 popsize=popsize)
    print(res)
if False:
    from scipy.optimize import shgo
    res = shgo(benchmarks.f, bounds, minimizer_kwargs={"method": "BFGS"})
if True:
    from scipy.optimize import dual_annealing
    res = dual_annealing(benchmarks.f, bounds)
Exemplo n.º 41
0
    def gen_labels(self, set_, mi_cutoff=0.005):
        """
        Method that generates labels for every subcluster within a particular clustering of the data

        Parameters
        ----------
        set_ : str
            The string identifier for a particular clustering of the space.

        mi_cutoff, float, default=0.005
            The mutual information cutoff that is used to remove unimportant labels.
        """
        summaries = {}
        for set_val, idxs in tqdm.tqdm(self[set_].items()):
            fingerprints = []
            binary_labels = []
            mis = np.zeros((len(self.fingerprints, )))
            high_mis = []
            ents = np.zeros_like(mis)
            for i, fp in enumerate(self.fingerprints.values()):
                mis[i] = self.mi(set_, fp.property, set_val=set_val)
                ents[i] = self.entropy(idxs, fp.property)
                labels = self.data[fp.property].loc[idxs]
                bounds = np.array([np.min(labels), np.max(labels)])
                mi = self.mi(set_, fp.property, set_val=set_val)
                ent = self.entropy(idxs, fp.property)

                # The following converts any labels that have multiclass or continuous structure into binary labels.
                if mi > mi_cutoff:
                    high_mis.append(mi)
                    if ent < (fp.sensitivity):
                        if fp.label_type == "continuous":
                            u = np.mean(labels)
                            var = np.var(labels)
                            binary_labels.append(
                                np.logical_and(
                                    (u - 0.1 * var) < self.data[fp.property],
                                    self.data[fp.property] < (u + 0.1 * var)))
                            fingerprints.append(fp.summary(u, entropy=ent))
                        else:
                            binary_labels.append(
                                (self.data[fp.property] == round(
                                    np.mean(labels))).to_numpy())
                            fingerprints.append(
                                fp.summary(round(np.mean(labels)), ent,
                                           fp.sensitivity + 0.1))
                    else:
                        if fp.label_type == "binary":
                            binary_labels.append(
                                (self.data[fp.property] == round(
                                    np.mean(labels))).to_numpy())
                            fingerprints.append(
                                fp.summary(np.mean(labels), ent,
                                           sensitivity=1))
                            continue

                        elif fp.label_type == "continuous":
                            split = float(
                                dual_annealing(self.cost_generator(
                                    set_, fp.property, set_val=set_val),
                                               bounds=bounds.reshape(1, 2),
                                               maxiter=250)["x"])
                        else:
                            iterable = np.array(range(*bounds))
                            split = iterable[np.argmin(
                                np.apply_along_axis(
                                    self.cost_generator(set_,
                                                        fp.property,
                                                        set_val=set_val), 0,
                                    iterable))]

                        if split > np.mean(self.data.loc[idxs][fp.property]):
                            fingerprints.append(fp.to_binary(split, "<"))
                            binary_labels.append(
                                (self.data[fp.property] < split).to_numpy())
                        else:
                            fingerprints.append(fp.to_binary(split, ">"))
                            binary_labels.append(
                                (self.data[fp.property] >= split).to_numpy())

            if len(binary_labels) > 1:
                # Prune to 5 biggest mi labels
                num_options = np.argsort(
                    np.array(high_mis)
                )[::
                  -1][:
                      5]  # Access the 5 largest elements in the reversed argsort list.
                binary_labels = np.array(binary_labels)

                idxs = []
                for i in range(1, len(num_options) + 1):
                    #idxs.append(list(combinations(list(range(num_options)),i))) # Generate all possible permutations
                    idxs.append(list(combinations(
                        num_options, i)))  # Generate all possible permutations
                idxs = [item for sublist in idxs
                        for item in sublist]  # Flatten the list of lists

                mut_infs = []
                for idx in idxs:  # Iterate over possible label combinations
                    current_label = reduce(np.logical_and,
                                           binary_labels[tuple([idx])])
                    mut_infs.append(
                        self.mi(set_,
                                "Labels_Provided",
                                set_val=set_val,
                                labels=current_label))

                summary = [
                    fingerprints[x] for x in idxs[np.argmax(mut_infs)]
                ]  # Isolate the fingerprint summaries for each index is the argmax collection
                summaries[set_val] = "\n\t".join(summary) + (
                    f"\n\tMutual Information {np.max(mut_infs)}")

            elif len(binary_labels) == 1:
                summaries[set_val] = "".join(fingerprints) + (
                    f"\n\tMutual Information {np.max(mis)}"
                )  # Don't try and iterate over label comnbinations if there is only one.

            else:
                summaries[set_val] = ("No Meaningful Label")

        return summaries
Exemplo n.º 42
0
 def test_high_dim(self):
     ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
     assert_allclose(ret.fun, 0., atol=1e-12)
     assert ret.success
Exemplo n.º 43
0
def f_trajet_plan(x):
    global aircraft_1, aircraft_2, destination, alpha, fuel_burn_1, fuel_burn_2
    res_1 = fuel_burn_1 * m.sqrt((x[0] - aircraft_1[0])**2 +
                                 (x[1] - aircraft_1[1])**2)
    res_2 = fuel_burn_2 * m.sqrt((x[0] - aircraft_2[0])**2 +
                                 (x[1] - aircraft_2[1])**2)
    res_3 = (1 - alpha) * m.sqrt((x[0] - destination[0])**2 +
                                 (x[1] - destination[1])**2) * (fuel_burn_1 +
                                                                fuel_burn_2)
    return res_1 + res_2 + res_3


# Results PLAN
results = dict()
results['shgo'] = optimize.shgo(f_trajet_plan, bounds)
results['DA'] = optimize.dual_annealing(f_trajet_plan, bounds)
results['DE'] = optimize.differential_evolution(f_trajet_plan, bounds)
results['BH'] = optimize.basinhopping(f_trajet_plan, bounds)
print('Here are the results PLAN: ', results['shgo'].x)
RDV_plan = results['shgo'].x

print(results['shgo'])

# Create a map showing connections for transatlantic flights
# --------------

# Transatlantic map
region = [-140, 0, 80, 90]  # [long_left, lat_bottom, long_right, lat_top]
output_file = './outputs/basemap_flight_path.jpg'
res_dpi = 600
Exemplo n.º 44
0
 def test_nb_fun_call(self):
     ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
     assert_equal(self.nb_fun_call, ret.nfev)
Exemplo n.º 45
0
 def test_low_dim_no_ls(self):
     ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True)
     assert_allclose(ret.fun, 0., atol=1e-4)
Exemplo n.º 46
0
 def test_maxiter(self):
     ret = dual_annealing(self.func,
                          self.ld_bounds,
                          maxiter=700,
                          seed=self.seed)
     assert ret.nit <= 700
Exemplo n.º 47
0
    r'C:\Users\llim726\Documents\infant_analysis\jw\jw_Scaled.osim')

# Locate the left hip joint and get the initial location parameters
jointset = model.getJointSet()
hip_l = jointset.get('hip_l')
hlx = hip_l.get_frames(0).get_translation()[0]
hly = hip_l.get_frames(0).get_translation()[1]
hlz = hip_l.get_frames(0).get_translation()[2]

hip_l.get_frames(0).set_translation(osim.Vec3(-0.05, -0.05, -0.05))
s = model.initSystem()
model.printToXML(
    r"C:\Users\llim726\Documents\infant_analysis\jw\jw_Scaled_offset.osim")

# Set up and run a least squares optimisation function - minimising the mean RMS error
x0_params = [hlx, hly, hlz]
min_x0 = min(x0_params)
max_x0 = max(
    x0_params)  # we should expect that x,y,z are all negative for the hip
#bounds = [min_x0-0.5,max_x0+0.5]
#sln = least_squares(access_model, x0_params)#,bounds=bounds)

lw = [-0.5, -0.5, -0.5]
up = [0.5, 0.5, 0.5]
start_time = time.time()
sln = dual_annealing(access_model, bounds=list(zip(lw, up)))
elapsed_time = time.time() - start_time
# Edit the existing model and generate a new osim file with the optimised HJC
hip_l.get_frames(0).set_translation(osim.Vec3(sln.x[0], sln.x[1], sln.x[2]))
s = model.initSystem()
model.printToXML(r"C:\Users\llim726\Documents\infant_analysis\jw\jw_test.osim")
Exemplo n.º 48
0
 def test_multi_ls_minimizer(self, method, atol):
     ret = dual_annealing(self.func,
                          self.ld_bounds,
                          minimizer_kwargs=dict(method=method),
                          seed=self.seed)
     assert_allclose(ret.fun, 0., atol=atol)
import numpy as np
from scipy.optimize import dual_annealing
func = lambda x: np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
lw = [-5.12] * 10
up = [5.12] * 10
ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
print(ret.x)
print(ret.fun)
print(ret)