예제 #1
0
파일: graphics.py 프로젝트: vova292/pyomo
def _add_scipy_dist_CI(x,y,color,columns,ncells,alpha,dist,theta_star,label=None):
    ax = plt.gca()
    xvar, yvar, loc = _get_variables(ax,columns)
    
    X,Y = _get_XYgrid(x,y,ncells)
    
    data_slice = []
    
    if isinstance(dist, stats._multivariate.multivariate_normal_frozen):
        for var in theta_star.index:
            if var == xvar:
                data_slice.append(X)
            elif var == yvar:
                data_slice.append(Y)
            elif var not in [xvar,yvar]:
                data_slice.append(np.array([[theta_star[var]]*ncells]*ncells))
        data_slice = np.dstack(tuple(data_slice))
        
    elif isinstance(dist, stats.kde.gaussian_kde):
        for var in theta_star.index:
            if var == xvar:
                data_slice.append(X.ravel())
            elif var == yvar:
                data_slice.append(Y.ravel())
            elif var not in [xvar,yvar]:
                data_slice.append(np.array([theta_star[var]]*ncells*ncells))
        data_slice = np.array(data_slice)
    else:
        return
        
    Z = dist.pdf(data_slice)
    Z = Z.reshape((ncells, ncells))
    
    ax.contour(X,Y,Z, levels=[alpha], colors=color) 
예제 #2
0
 def test_get_dfds_dcds2(self):
     '''
     It tests the function get_sensitivity with rooney & biegler's model.
     '''
     variable_name = ['asymptote', 'rate_constant']
     theta = {
         'asymptote': 19.142575284617866,
         'rate_constant': 0.53109137696521
     }
     cov = np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]])
     model_uncertain = ConcreteModel()
     model_uncertain.asymptote = Var(initialize=15)
     model_uncertain.rate_constant = Var(initialize=0.5)
     model_uncertain.obj = Objective(
         expr=model_uncertain.asymptote *
         (1 - exp(-model_uncertain.rate_constant * 10)),
         sense=minimize)
     theta = {
         'asymptote': 19.142575284617866,
         'rate_constant': 0.53109137696521
     }
     for v in variable_name:
         getattr(model_uncertain, v).setlb(theta[v])
         getattr(model_uncertain, v).setub(theta[v])
     gradient_f, gradient_c, col, row, line_dic = get_dfds_dcds(
         model_uncertain, variable_name)
     np.testing.assert_almost_equal(gradient_f, [0.99506259, 0.945148])
     np.testing.assert_almost_equal(gradient_c, np.array([]))
     assert col == ['asymptote', 'rate_constant']
     assert row == ['obj']
예제 #3
0
파일: helper.py 프로젝트: ZedongPeng/pyomo
def cloneXYZ(x, y, z):
    """
    This function is to create a hard copy of vector x, y, z.
    """
    x0 = np.array(x)
    y0 = np.array(y)
    z0 = np.array(z)
    return x0, y0, z0
예제 #4
0
 def test_indexed_constraint(self):
     m = ConcreteModel()
     m.x = Var([0, 1, 2, 3])
     A = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
     b = np.array([10, 20])
     m.c = Constraint([0, 1], expr=A @ m.x <= b)
     self.assertTrue(
         compare_expressions(
             m.c[0].expr,
             m.x[0] + 2 * m.x[1] + 3 * m.x[2] + 4 * m.x[3] <= 10))
     self.assertTrue(
         compare_expressions(
             m.c[1].expr,
             5 * m.x[0] + 6 * m.x[1] + 7 * m.x[2] + 8 * m.x[3] <= 20))
예제 #5
0
    def test_cov_scipy_curve_fit_comparison(self):
        '''
        Scipy results differ in the 3rd decimal place from the paper. It is possible
        the paper used an alternative finite difference approximation for the Jacobian.
        '''

        ## solve with optimize.curve_fit
        def model(t, asymptote, rate_constant):
            return asymptote * (1 - np.exp(-rate_constant * t))

        # define data
        t = self.data['hour'].to_numpy()
        y = self.data['y'].to_numpy()

        # define initial guess
        theta_guess = np.array([15, 0.5])

        theta_hat, cov = scipy.optimize.curve_fit(model, t, y, p0=theta_guess)

        self.assertAlmostEqual(theta_hat[0], 19.1426,
                               places=2)  # 19.1426 from the paper
        self.assertAlmostEqual(theta_hat[1], 0.5311,
                               places=2)  # 0.5311 from the paper

        self.assertAlmostEqual(cov[0, 0], 6.22864,
                               places=2)  # 6.22864 from paper
        self.assertAlmostEqual(cov[0, 1], -0.4322,
                               places=2)  # -0.4322 from paper
        self.assertAlmostEqual(cov[1, 0], -0.4322,
                               places=2)  # -0.4322 from paper
        self.assertAlmostEqual(cov[1, 1], 0.04124,
                               places=2)  # 0.04124 from paper
예제 #6
0
    def test_get_dsdp2(self):
        '''
        It tests the function get_dsdp with rooney & biegler's model.
        '''
        variable_name = ['asymptote', 'rate_constant']

        theta = {
            'asymptote': 19.142575284617866,
            'rate_constant': 0.53109137696521
        }
        cov = np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]])
        model_uncertain = ConcreteModel()
        model_uncertain.asymptote = Var(initialize=15)
        model_uncertain.rate_constant = Var(initialize=0.5)
        model_uncertain.obj = Objective(
            expr=model_uncertain.asymptote *
            (1 - exp(-model_uncertain.rate_constant * 10)),
            sense=minimize)
        theta = {
            'asymptote': 19.142575284617866,
            'rate_constant': 0.53109137696521
        }
        for v in variable_name:
            getattr(model_uncertain, v).setlb(theta[v])
            getattr(model_uncertain, v).setub(theta[v])
        dsdp, col = get_dsdp(model_uncertain, variable_name, theta, {})
        np.testing.assert_almost_equal(dsdp.toarray(), [[1., 0.], [0., 1.]])
        assert col == ['asymptote', 'rate_constant']
예제 #7
0
    def _simulate_with_scipy(self, initcon, tsim, switchpts,
                             varying_inputs, integrator,
                             integrator_options):

        scipyint = \
            scipy.ode(self._rhsfun).set_integrator(integrator,
                                                   **integrator_options)
        scipyint.set_initial_value(initcon, tsim[0])

        profile = np.array(initcon)
        i = 1
        while scipyint.successful() and scipyint.t < tsim[-1]:

            # check if tsim[i-1] is a switching time and update value
            if tsim[i - 1] in switchpts:
                for v in self._siminputvars.keys():
                    if tsim[i - 1] in varying_inputs[v]:
                        p = self._templatemap[self._siminputvars[v]]
                        p.set_value(varying_inputs[v][tsim[i - 1]])

            profilestep = scipyint.integrate(tsim[i])
            profile = np.vstack([profile, profilestep])
            i += 1

        if not scipyint.successful():
            raise DAE_Error("The Scipy integrator %s did not terminate "
                            "successfully." % integrator)
        return [tsim, profile]
예제 #8
0
 def test_pow2(self):
     xl = np.linspace(-2, 2, 17)
     xu = np.linspace(-2, 2, 17)
     yl = np.linspace(-2, 2, 17)
     yu = np.linspace(-2, 2, 17)
     for _xl in xl:
         for _xu in xu:
             if _xl > _xu:
                 continue
             for _yl in yl:
                 for _yu in yu:
                     if _yl > _yu:
                         continue
                     if _yl == _yu and _yl != round(_yl) and _xu < 0:
                         with self.assertRaises(
                                 InfeasibleConstraintException):
                             lb, ub = interval.power(_xl, _xu, _yl, _yu)
                     else:
                         lb, ub = interval.power(_xl, _xu, _yl, _yu)
                         if isfinite(lb) and isfinite(ub):
                             nan_fill = 0.5 * (lb + ub)
                         elif isfinite(lb):
                             nan_fill = lb + 1
                         elif isfinite(ub):
                             nan_fill = ub - 1
                         else:
                             nan_fill = 0
                         x = np.linspace(_xl, _xu, 17)
                         y = np.linspace(_yl, _yu, 17)
                         all_values = list()
                         for _x in x:
                             z = _x**y
                             #np.nan_to_num(z, copy=False, nan=nan_fill, posinf=np.inf, neginf=-np.inf)
                             tmp = []
                             for _z in z:
                                 if math.isnan(_z):
                                     tmp.append(nan_fill)
                                 else:
                                     tmp.append(_z)
                             all_values.append(np.array(tmp))
                         all_values = np.array(all_values)
                         estimated_lb = all_values.min()
                         estimated_ub = all_values.max()
                         self.assertTrue(lb - 1e-8 <= estimated_lb)
                         self.assertTrue(ub + 1e-8 >= estimated_ub)
예제 #9
0
 def generate_first_x(self, G, tears):
     edge_list = self.idx_to_edge(G)
     x = []
     for tear in tears:
         arc = G.edges[edge_list[tear]]["arc"]
         for name, index, mem in arc.src.iter_vars(names=True):
             peer = self.source_dest_peer(arc, name, index)
             x.append(value(peer))
     x = numpy.array(x)
     return x
예제 #10
0
    def test_create_objective_from_numpy(self):
        # Test issue #87
        model = ConcreteModel()

        nsample = 3
        nvariables = 2
        X0 = np.array(range(nsample)).reshape([nsample, 1])
        model.X = 1 + np.array(range(nsample * nvariables)).reshape(
            (nsample, nvariables))
        X = np.concatenate([X0, model.X], axis=1)

        model.I = RangeSet(1, nsample)
        model.J = RangeSet(1, nvariables)

        error = np.ones((nsample, 1))
        beta = np.ones((nvariables + 1, 1))
        model.Y = np.dot(X, beta) + error

        model.beta = Var(model.J)
        model.beta0 = Var()

        def obj_fun(model):
            return sum(
                abs(model.Y[i - 1] -
                    (model.beta0 + sum(model.X[i - 1, j - 1] * model.beta[j]
                                       for j in model.J))) for i in model.I)

        model.OBJ = Objective(rule=obj_fun)

        def obj_fun_quad(model):
            return sum(
                (model.Y[i - 1] -
                 (model.beta0 + sum(model.X[i - 1, j - 1] * model.beta[j]
                                    for j in model.J)))**2 for i in model.I)

        model.OBJ_QUAD = Objective(rule=obj_fun_quad)

        self.assertEqual(
            str(model.OBJ.expr), "abs(4.0 - (beta[1] + 2*beta[2] + beta0)) + "
            "abs(9.0 - (3*beta[1] + 4*beta[2] + beta0)) + "
            "abs(14.0 - (5*beta[1] + 6*beta[2] + beta0))")
        self.assertEqual(model.OBJ.expr.polynomial_degree(), None)
        self.assertEqual(model.OBJ_QUAD.expr.polynomial_degree(), 2)
예제 #11
0
 def __call__(self, exception=True):
     """Compute the value of the body of this constraint"""
     if self.x is None:
         raise ValueError("No variable order has been assigned")
     values = numpy.array([v.value for v in self.x], dtype=float)
     if numpy.isnan(values).any():
         if exception:
             raise ValueError("One or more variables "
                              "do not have a value")
         return None
     return self._A.dot(values)
예제 #12
0
 def tear_diff_direct(self, G, tears):
     """
     Returns numpy arrays of values for src and dest members
     for all edges in the tears list of edge indexes.
     """
     svals = []
     dvals = []
     edge_list = self.idx_to_edge(G)
     for tear in tears:
         arc = G.edges[edge_list[tear]]["arc"]
         src, dest = arc.src, arc.dest
         sf = arc.expanded_block.component("splitfrac")
         for name, index, mem in src.iter_vars(names=True):
             if src.is_extensive(name) and sf is not None:
                 # TODO: same as above, what if there's no splitfrac
                 svals.append(value(mem * sf))
             else:
                 svals.append(value(mem))
             dvals.append(value(self.source_dest_peer(arc, name, index)))
     svals = numpy.array(svals)
     dvals = numpy.array(dvals)
     return svals, dvals
예제 #13
0
    def evaluateDx(self, x):
        # This is messy, currently redundant with
        # some lines in buildROM()
        self.countDx += 1
        ans = []
        for i in range(0, self.ly):
            fcn = self.TRF.external_fcns[i]._fcn
            values = []
            for j in self.exfn_xvars_ind[i]:
                values.append(x[j])

            ans.append(fcn._fcn(*values))
        return np.array(ans)
예제 #14
0
파일: graphics.py 프로젝트: vova292/pyomo
def _get_data_slice(xvar,yvar,columns,data,theta_star):

    search_ranges = {} 
    for var in columns:
        if var in [xvar,yvar]:
            search_ranges[var] = data[var].unique()
        else:
            search_ranges[var] = [theta_star[var]]

    data_slice = pd.DataFrame(list(itertools.product(*search_ranges.values())),
                            columns=search_ranges.keys())
    
    # griddata will not work with linear interpolation if the data 
    # values are constant in any dimension
    for col in data[columns].columns:
        cv = data[col].std()/data[col].mean() # Coefficient of variation
        if cv < 1e-8: 
            temp = data.copy()
            # Add variation (the interpolation is later scaled)
            if cv == 0:
                temp[col] = temp[col] + data[col].mean()/10
            else:
                temp[col] = temp[col] + data[col].std()
            data = data.append(temp, ignore_index=True)
    
    data_slice['obj'] = scipy.interpolate.griddata(
        np.array(data[columns]),
        np.array(data[['obj']]),
        np.array(data_slice[columns]),
        method='linear',
        rescale=True,
    )
        
    X = data_slice[xvar]
    Y = data_slice[yvar]
    Z = data_slice['obj']
    
    return X,Y,Z
예제 #15
0
 def generate_gofx(self, G, tears):
     edge_list = self.idx_to_edge(G)
     gofx = []
     for tear in tears:
         arc = G.edges[edge_list[tear]]["arc"]
         src = arc.src
         sf = arc.expanded_block.component("splitfrac")
         for name, index, mem in src.iter_vars(names=True):
             if src.is_extensive(name) and sf is not None:
                 # TODO: same as above, what if there's no splitfrac
                 gofx.append(value(mem * sf))
             else:
                 gofx.append(value(mem))
     gofx = numpy.array(gofx)
     return gofx
예제 #16
0
    def test_init_param_from_ndarray(self):
        # Test issue #2033
        m = ConcreteModel()
        m.ix_set = RangeSet(2)

        p_init = np.array([0, 5])

        def init_workaround(model, i):
            return p_init[i - 1]

        m.p = Param(m.ix_set, initialize=init_workaround)
        m.v = Var(m.ix_set)
        expr = m.p[1] > m.v[1]
        self.assertIsInstance(expr, InequalityExpression)
        self.assertEqual(str(expr), "v[1]  <  0")
        expr = m.p[2] > m.v[2]
        self.assertIsInstance(expr, InequalityExpression)
        self.assertEqual(str(expr), "v[2]  <  5")
예제 #17
0
    def __init__(self, A, lb=None, ub=None, rhs=None, x=None, sparse=True):
        if (not has_numpy) or (not has_scipy):  #pragma:nocover
            raise ValueError("This class requires numpy and scipy")

        m, n = A.shape
        assert m > 0
        assert n > 0
        cons = (_MatrixConstraintData(i) for i in range(m))
        super(matrix_constraint, self).__init__(cons)

        if sparse:
            self._sparse = True
            self._A = scipy.sparse.csr_matrix(A, dtype=float, copy=True)
            self._A.data.setflags(write=False)
            self._A.indices.setflags(write=False)
            self._A.indptr.setflags(write=False)
        else:
            self._sparse = False
            self._A = numpy.array(A, dtype=float, copy=True)
            self._A.setflags(write=False)
        self._lb = numpy.ndarray(m, dtype=float)
        self._ub = numpy.ndarray(m, dtype=float)
        self._equality = numpy.ndarray(m, dtype=bool)
        self._equality.fill(False)

        # now use the setters to fill the arrays
        self.x = x
        if rhs is None:
            self.lb = lb
            self.ub = ub
        else:
            if ((lb is not None) or \
                (ub is not None)):
                raise ValueError("The 'rhs' keyword can not "
                                 "be used with the 'lb' or "
                                 "'ub' keywords to initialize"
                                 " a constraint.")
            self.rhs = rhs
예제 #18
0
파일: sens.py 프로젝트: jmorgan29/idaes-pse
def get_dfds_dcds(model, theta_names, tee=False, solver_options=None):
    """This function calculates gradient vector of the objective function
       and constraints with respect to the variables in theta_names.

    e.g) min f:  p1*x1+ p2*(x2^2) + p1*p2
         s.t  c1: x1 + x2 = p1
              c2: x2 + x3 = p2
              0 <= x1, x2, x3 <= 10
              p1 = 10
              p2 = 5
    - Variables = (x1, x2, x3, p1, p2)
    - Fix p1 and p2 with estimated values

    The following terms are used to define the output dimensions:
    Ncon   = number of constraints
    Nvar   = number of variables (Nx + Ntheta)
    Nx     = the numer of decision (primal) variables
    Ntheta = number of uncertain parameters.

    Parameters
    ----------
    model: Pyomo ConcreteModel
        model should includes an objective function
    theta_names: list of strings
        List of Var names
    tee: bool, optional
        Indicates that ef solver output should be teed
    solver_options: dict, optional
        Provides options to the solver (also the name of an attribute)

    Returns
    -------
    gradient_f: numpy.ndarray
        Length Nvar array. A gradient vector of the objective function
        with respect to the (decision variables, parameters)
        at the optimal solution
    gradient_c: scipy.sparse.csr.csr_matrix
        Ncon by Nvar size sparse matrix. A Jacobian matrix of the constraints
        with respect to the (decision variables, parameters) at the optimal
        solution. Each row contains [column number, row number, and value],
        colum order follows variable order in col and index starts from 1.
        Note that it follows k_aug. If no constraint exists, return []
    col: list
        Size Nvar. list of variable names
    row: list
        Size Ncon+1. List of constraints and objective function names
        The final element is the objective function name.
    line_dic: dict
        column numbers of the theta_names in the model. Index starts from 1

    Raises
    ------
    RuntimeError
        When ipopt or kaug or dotsens is not available
    Exception
        When ipopt fails
    """
    #Create the solver plugin using the ASL interface
    ipopt = SolverFactory('ipopt', solver_io='nl')
    if solver_options is not None:
        ipopt.options = solver_options
    kaug = SolverFactory('k_aug', solver_io='nl')
    dotsens = SolverFactory('dot_sens', solver_io='nl')
    if not ipopt.available(False):
        raise RuntimeError('ipopt is not available')
    if not kaug.available(False):
        raise RuntimeError('k_aug is not available')
    if not dotsens.available(False):
        raise RuntimeError('dotsens is not available')

    # Declare Suffixes
    _add_sensitivity_suffixes(model)

    # K_AUG SUFFIXES
    model.dof_v = Suffix(direction=Suffix.EXPORT)  #: SUFFIX FOR K_AUG
    model.rh_name = Suffix(
        direction=Suffix.IMPORT)  #: SUFFIX FOR K_AUG AS WELL
    kaug.options["print_kkt"] = ""
    results = ipopt.solve(model, tee=tee)

    # Raise Exception if ipopt fails
    if (results.solver.status == SolverStatus.warning):
        raise Exception(results.solver.Message)

    for o in model.component_objects(Objective, active=True):
        f_mean = value(o)
    model.ipopt_zL_in.update(model.ipopt_zL_out)
    model.ipopt_zU_in.update(model.ipopt_zU_out)
    #: run k_aug
    kaug.solve(model, tee=tee)  #: always call k_aug AFTER ipopt.
    model.write('col_row.nl',
                format='nl',
                io_options={'symbolic_solver_labels': True})
    # get the column numbers of theta
    line_dic = {}
    try:
        for v in theta_names:
            line_dic[v] = line_num('col_row.col', v)
        # load gradient of the objective function
        gradient_f = np.loadtxt("./GJH/gradient_f_print.txt")
        with open("col_row.col", "r") as myfile:
            col = myfile.read().splitlines()
        col = [
            i for i in col
            if SensitivityInterface.get_default_block_name() not in i
        ]
        with open("col_row.row", "r") as myfile:
            row = myfile.read().splitlines()
    except Exception as e:
        print('File not found.')
        raise e
    # load gradient of all constraints (sparse)
    # If no constraint exists, return []
    num_constraints = len(
        list(
            model.component_data_objects(Constraint,
                                         active=True,
                                         descend_into=True)))
    if num_constraints > 0:
        try:
            # load text file from kaug
            gradient_c = np.loadtxt("./GJH/A_print.txt")
            # This is a sparse matrix
            # gradient_c[:,0] are column index
            # gradient_c[:,1] are data index
            # gradient_c[:,1] are the matrix values
        except Exception as e:
            print('kaug file ./GJH/A_print.txt not found.')

        # Subtract 1 from row and column indices to convert from
        # start at 1 (kaug) to start at 0 (numpy)
        row_idx = gradient_c[:, 1] - 1
        col_idx = gradient_c[:, 0] - 1
        data = gradient_c[:, 2]
        gradient_c = sparse.csr_matrix((data, (row_idx, col_idx)),
                                       shape=(len(row) - 1, len(col)))
    else:
        gradient_c = np.array([])
    # remove all generated files

    shutil.move("col_row.nl", "./GJH/")
    shutil.move("col_row.col", "./GJH/")
    shutil.move("col_row.row", "./GJH/")
    shutil.rmtree('GJH', ignore_errors=True)

    return gradient_f, gradient_c, col, row, line_dic
예제 #19
0
def get_dfds_dcds(model, theta_names, tee=False, solver_options=None):
    """This function calculates gradient vector of the objective function 
       and constraints with respect to the variables and parameters.

    e.g) min f:  p1*x1+ p2*(x2^2) + p1*p2
         s.t  c1: x1 + x2 = p1
              c2: x2 + x3 = p2
              0 <= x1, x2, x3 <= 10
              p1 = 10
              p2 = 5
    - Variables = (x1, x2, x3, p1, p2)
    - Fix p1 and p2 with estimated values

    The following terms are used to define the output dimensions:
    Ncon   = number of constraints
    Nvar   = number of variables (Nx + Ntheta)
    Nx     = number of decision (primal) variables
    Ntheta = number of uncertain parameters.

    Parameters
    ----------
    model: Pyomo ConcreteModel
        model should include an objective function
    theta_names: list of strings
        List of Var names
    tee: bool, optional
        Indicates that ef solver output should be teed
    solver_options: dict, optional
        Provides options to the solver (also the name of an attribute)

    Returns
    -------
    gradient_f: numpy.ndarray
        Length Nvar array. A gradient vector of the objective function
        with respect to the (decision variables, parameters) at the optimal
        solution
    gradient_c: scipy.sparse.csr.csr_matrix
        Ncon by Nvar size sparse matrix. A Jacobian matrix of the
        constraints with respect to the (decision variables, parameters)
        at the optimal solution. Each row contains [column number,
        row number, and value], column order follows variable order in col
        and index starts from 1. Note that it follows k_aug.
        If no constraint exists, return []
    col: list
        Size Nvar list of variable names
    row: list
        Size Ncon+1 list of constraints and objective function names.
        The final element is the objective function name.
    line_dic: dict
        column numbers of the theta_names in the model. Index starts from 1

    Raises
    ------
    RuntimeError
        When ipopt or k_aug or dotsens is not available
    Exception
        When ipopt fails 
    """
    # Create the solver plugin using the ASL interface
    ipopt = SolverFactory('ipopt', solver_io='nl')
    if solver_options is not None:
        ipopt.options = solver_options
    k_aug = SolverFactory('k_aug', solver_io='nl')
    if not ipopt.available(False):
        raise RuntimeError('ipopt is not available')
    if not k_aug.available(False):
        raise RuntimeError('k_aug is not available')

    # Declare Suffixes
    _add_sensitivity_suffixes(model)

    # K_AUG SUFFIXES
    model.dof_v = Suffix(direction=Suffix.EXPORT)  #: SUFFIX FOR K_AUG
    model.rh_name = Suffix(
        direction=Suffix.IMPORT)  #: SUFFIX FOR K_AUG AS WELL
    k_aug.options["print_kkt"] = ""

    results = ipopt.solve(model, tee=tee)

    # Raise exception if ipopt fails
    if (results.solver.status == SolverStatus.warning):
        raise Exception(results.solver.Message)

    for o in model.component_objects(Objective, active=True):
        f_mean = value(o)
    model.ipopt_zL_in.update(model.ipopt_zL_out)
    model.ipopt_zU_in.update(model.ipopt_zU_out)

    # run k_aug
    k_aug_interface = K_augInterface(k_aug=k_aug)
    k_aug_interface.k_aug(model, tee=tee)  #: always call k_aug AFTER ipopt.

    nl_data = {}
    with InTempDir():
        base_fname = "col_row"
        nl_file = ".".join((base_fname, "nl"))
        row_file = ".".join((base_fname, "row"))
        col_file = ".".join((base_fname, "col"))
        model.write(nl_file, io_options={"symbolic_solver_labels": True})
        for fname in [nl_file, row_file, col_file]:
            with open(fname, "r") as fp:
                nl_data[fname] = fp.read()

    col = nl_data[col_file].strip("\n").split("\n")
    row = nl_data[row_file].strip("\n").split("\n")

    # get the column numbers of "parameters"
    line_dic = {name: col.index(name) for name in theta_names}

    grad_f_file = os.path.join("GJH", "gradient_f_print.txt")
    grad_f_string = k_aug_interface.data[grad_f_file]
    gradient_f = np.fromstring(grad_f_string, sep="\n\t")
    col = [
        i for i in col
        if SensitivityInterface.get_default_block_name() not in i
    ]

    grad_c_file = os.path.join("GJH", "A_print.txt")
    grad_c_string = k_aug_interface.data[grad_c_file]
    gradient_c = np.fromstring(grad_c_string, sep="\n\t")

    # Jacobian file is in "COO format," i.e. an nnz-by-3 array.
    # Reshape to a numpy array that matches this format.
    gradient_c = gradient_c.reshape((-1, 3))

    num_constraints = len(row) - 1  # Objective is included as a row
    if num_constraints > 0:
        row_idx = gradient_c[:, 1] - 1
        col_idx = gradient_c[:, 0] - 1
        data = gradient_c[:, 2]
        gradient_c = scipy.sparse.csr_matrix((data, (row_idx, col_idx)),
                                             shape=(num_constraints, len(col)))
    else:
        gradient_c = np.array([])

    return gradient_f, gradient_c, col, row, line_dic
예제 #20
0
    def test_cov_scipy_least_squares_comparison(self):
        '''
        Scipy results differ in the 3rd decimal place from the paper. It is possible
        the paper used an alternative finite difference approximation for the Jacobian.
        '''
        def model(theta, t):
            '''
            Model to be fitted y = model(theta, t)
            Arguments:
                theta: vector of fitted parameters
                t: independent variable [hours]
                
            Returns:
                y: model predictions [need to check paper for units]
            '''
            asymptote = theta[0]
            rate_constant = theta[1]

            return asymptote * (1 - np.exp(-rate_constant * t))

        def residual(theta, t, y):
            '''
            Calculate residuals
            Arguments:
                theta: vector of fitted parameters
                t: independent variable [hours]
                y: dependent variable [?]
            '''
            return y - model(theta, t)

        # define data
        t = self.data['hour'].to_numpy()
        y = self.data['y'].to_numpy()

        # define initial guess
        theta_guess = np.array([15, 0.5])

        ## solve with optimize.least_squares
        sol = scipy.optimize.least_squares(residual,
                                           theta_guess,
                                           method='trf',
                                           args=(t, y),
                                           verbose=2)
        theta_hat = sol.x

        self.assertAlmostEqual(theta_hat[0], 19.1426,
                               places=2)  # 19.1426 from the paper
        self.assertAlmostEqual(theta_hat[1], 0.5311,
                               places=2)  # 0.5311 from the paper

        # calculate residuals
        r = residual(theta_hat, t, y)

        # calculate variance of the residuals
        # -2 because there are 2 fitted parameters
        sigre = np.matmul(r.T, r / (len(y) - 2))

        # approximate covariance
        # Need to divide by 2 because optimize.least_squares scaled the objective by 1/2
        cov = sigre * np.linalg.inv(np.matmul(sol.jac.T, sol.jac))

        self.assertAlmostEqual(cov[0, 0], 6.22864,
                               places=2)  # 6.22864 from paper
        self.assertAlmostEqual(cov[0, 1], -0.4322,
                               places=2)  # -0.4322 from paper
        self.assertAlmostEqual(cov[1, 0], -0.4322,
                               places=2)  # -0.4322 from paper
        self.assertAlmostEqual(cov[1, 1], 0.04124,
                               places=2)  # 0.04124 from paper
예제 #21
0
def _numpy_vector(val):
    ans = np.array(val, np.float64)
    if len(ans.shape) != 1:
        raise ValueError("expected a vector, but recieved a matrix "
                         "with shape %s" % (ans.shape,))
    return ans