Beispiel #1
0
    def hessianstructure(self):
        if not self._hessian_available:
            return np.zeros(0), np.zeros(0)

        row = np.compress(self._hess_lower_mask, self._hess_lag.row)
        col = np.compress(self._hess_lower_mask, self._hess_lag.col)
        return row, col
Beispiel #2
0
 def getInitialValue(self):
     x = np.zeros(self.lx, dtype=float)
     y = np.zeros(self.ly, dtype=float)
     z = np.zeros(self.lz, dtype=float)
     for i in range(0, self.lx):
         x[i] = value(self.TRF.xvars[i])
     for i in range(0, self.ly):
         #initialization of y?
         y[i] = 1
     for i in range(0, self.lz):
         z[i] = value(self.TRF.zvars[i])
     return x, y, z
Beispiel #3
0
    def __init__(self, lhs_coefficients_mat, rhs_vec):
        """
        PolyhedralSet constructor

        Args:
            lhs_coefficients_mat: Matrix of left-hand side coefficients for the linear inequality constraints defining the polyhedral set.
            rhs_vec: Vector (``list``) of right-hand side values for the linear inequality constraints defining the polyhedral set.
        """

        # === Real valued data
        mat = np.asarray(lhs_coefficients_mat)
        if not all(
                isinstance(elem, (int, float)) for row in lhs_coefficients_mat
                for elem in row):
            raise AttributeError(
                "Matrix lhs_coefficients_mat must be real-valued and numeric.")
        if not all(isinstance(elem, (int, float)) for elem in rhs_vec):
            raise AttributeError(
                "Vector rhs_vec must be real-valued and numeric.")
        # === Check columns of A must be same length as rhs
        if mat.shape[0] != len(rhs_vec):
            raise AttributeError(
                "Rows of lhs_coefficients_mat matrix must equal length of rhs_vec list."
            )
        # === Columns are non-zero
        if mat.shape[1] == 0:
            raise AttributeError(
                "Columns of lhs_coefficients_mat must be non-zero.")
        # === Matrix is not all zeros
        if all(
                np.isclose(elem, 0) for row in lhs_coefficients_mat
                for elem in row):
            raise AttributeError(
                "Matrix lhs_coefficients_mat cannot be all zeroes.")
        # === Non-emptiness
        res = sp.optimize.linprog(c=np.zeros(mat.shape[1]),
                                  A_ub=mat,
                                  b_ub=rhs_vec,
                                  method="simplex")
        if not res.success:
            raise AttributeError(
                "User-defined PolyhedralSet was determined to be empty. "
                "Please check the set of constraints supplied during set construction."
            )
        # === Boundedness
        if res.status == 3:
            # scipy linprog status == 3 indicates unboundedness
            raise AttributeError(
                "User-defined PolyhedralSet was determined to be unbounded. "
                "Please augment the set of constraints supplied during set construction."
            )

        self.coefficients_mat = lhs_coefficients_mat
        self.rhs_vec = rhs_vec
        self.type = "polyhedral"
Beispiel #4
0
    def cycle_edge_matrix(self, G):
        """
        Return a cycle-edge incidence matrix, a list of list of nodes in
        each cycle, and a list of list of edge indexes in each cycle.
        """
        cycleNodes, cycleEdges = self.all_cycles(
            G)  # call cycle finding algorithm

        # Create empty incidence matrix and then fill it out
        ceMat = numpy.zeros((len(cycleEdges), G.number_of_edges()),
                            dtype=numpy.dtype(int))
        for i in range(len(cycleEdges)):
            for e in cycleEdges[i]:
                ceMat[i, e] = 1

        return ceMat, cycleNodes, cycleEdges
Beispiel #5
0
def generate_quadratic_rom_geometry(lx, NUM_SEEDS=None):
    if lx in GeometryCache:
        condOpt, txt = GeometryCache[lx]
        psetOpt = np.loadtxt(StringIO(txt))
        if psetOpt.ndim < 2:
            psetOpt = psetOpt.reshape(psetOpt.size, 1)
        matOpt = _pset_to_mat(psetOpt, lx)
        if NUM_SEEDS is None:
            return condOpt, psetOpt, matOpt
        logger.info("Loading cached geometry with condition number %f" %
                    (condOpt, ))
    else:
        condOpt = np.inf
        psetOpt = None
        matOpt = None

    # 500 seems to be a reasonable number of iterations if we are
    # starting from scratch
    if NUM_SEEDS is None:
        NUM_SEEDS = 5000

    logger.info("Generating %d random geometries for LX=%s" % (NUM_SEEDS, lx))
    dim = int((lx * lx + lx * 3) / 2 + 1)
    x1 = np.zeros(lx)
    for i in range(0, NUM_SEEDS):
        #TODO: the following line returns error
        # ValueError: cannot reshape array of size 0 into shape (0)
        # for np.random.multivariate_normal(np.zeros(0),np.eye(0),0)
        pset = np.random.multivariate_normal(x1, np.eye(lx), dim - 1)
        for j in range(dim - 1):
            pset[j] = pset[j] / np.linalg.norm(pset[j])
        pset = np.append(pset, [x1], axis=0)
        mat = _pset_to_mat(pset, lx)
        cond = np.linalg.cond(mat)
        if (cond < condOpt):
            logger.info("new: %6d : %10.4f : %10.4f" % (i, condOpt, cond))
            condOpt = cond
            psetOpt = pset
            matOpt = mat
    if (psetOpt is None):
        logger.error("lx = %d failed in initialization "
                     "(no non-singular geometries found)!\n" % lx)
    return condOpt, psetOpt, matOpt
def get_index_of_max_violation(model_data, config, solve_data_list):

    is_discrete_scenarios = True if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS else False
    matrix_dim = 0
    indices_of_violating_realizations = []
    indices_of_violating_realizations_and_scenario = {}
    if is_discrete_scenarios:
        # There are num_scenarios by num_sep_objectives solutions to consider, take the worst-case per sep_objective
        for idx, row in enumerate(solve_data_list):
            if any(v.found_violation for v in row):
                matrix_dim += 1
                if len([v for v in row if v.found_violation]) > 1:
                    max_val, violation_idx = max(
                        (val.list_of_scaled_violations[idx], the_index)
                        for the_index, val in enumerate(row))
                else:
                    for elem in row:
                        if elem.found_violation:
                            violation_idx = row.index(elem)
                indices_of_violating_realizations.append(idx)
                indices_of_violating_realizations_and_scenario[
                    idx] = violation_idx
    else:
        matrix_dim = len(
            list(result for solve_list in solve_data_list
                 for result in solve_list if result.found_violation == True))
        idx_j = 0
        indices_of_violating_realizations.extend(
            i for i, x in enumerate(solve_data_list)
            if x[idx_j].found_violation == True)

    if matrix_dim == 0:
        return 0, 0  # Just a dummy index...

    matrix_of_violations = np.zeros(
        shape=(matrix_dim,
               len(model_data.separation_model.util.performance_constraints)))
    violation_dict = {}
    if is_discrete_scenarios:
        violation_dict = indices_of_violating_realizations_and_scenario
    else:
        for k in indices_of_violating_realizations:
            for l in range(len(solve_data_list[k])):
                if solve_data_list[k][l].found_violation:
                    violation_dict[k] = l
    for i in range(matrix_dim):
        for j in range(
                len(model_data.separation_model.util.performance_constraints)):
            if is_discrete_scenarios:
                idx_max_violation_from_scenario = violation_dict[
                    indices_of_violating_realizations[i]]
                matrix_of_violations[i][j] = max(
                    solve_data_list[indices_of_violating_realizations[i]]
                    [idx_max_violation_from_scenario].
                    list_of_scaled_violations[j], 0)
            else:
                matrix_of_violations[i][j] = max(
                    solve_data_list[indices_of_violating_realizations[i]]
                    [0].list_of_scaled_violations[j], 0)

    sums = []
    for i in range(matrix_of_violations.shape[1]):
        sum = 0
        column = matrix_of_violations[:, i]
        for j in range(len(column)):
            sum += column[j]
        sums.append(sum)
    max_value = max(sums)
    idx_i = sums.index(max_value)

    if is_discrete_scenarios:
        idx_j = violation_dict[idx_i]

    return idx_i, idx_j
Beispiel #7
0
def get_dsdp(model,
             theta_names,
             theta,
             var_dic={},
             tee=False,
             solver_options=None):
    """This function calculates gradient vector of the (decision variables,
    parameters) with respect to the paramerters (theta_names).

    e.g) min f:  p1*x1+ p2*(x2^2) + p1*p2
         s.t  c1: x1 + x2 = p1
              c2: x2 + x3 = p2
              0 <= x1, x2, x3 <= 10
              p1 = 10
              p2 = 5
    the function retuns dx/dp and dp/dp, and colum orders.

    The following terms are used to define the output dimensions:
    Ncon   = number of constraints
    Nvar   = number of variables (Nx + Ntheta)
    Nx     = the numer of decision (primal) variables
    Ntheta = number of uncertain parameters.

    Parameters
    ----------
    model: Pyomo ConcreteModel
        model should includes an objective function
    theta_names: list of strings
        List of Var names
    theta: dict
        Estimated parameters e.g) from parmest
    tee: bool, optional
        Indicates that ef solver output should be teed
    solver_options: dict, optional
        Provides options to the solver (also the name of an attribute)
    var_dic: dictionary
        If any original variable contains "'", need an auxiliary dictionary
        with keys theta_names without "'", values with "'".
        e.g) var_dic=
        {'fs.properties.tau[benzene,toluene]':
        "fs.properties.tau['benzene','toluene']",
        'fs.properties.tau[toluene,benzene]':
        "fs.properties.tau['toluene','benzene']"}

    Returns
    -------
    dsdp: scipy.sparse.csr.csr_matrix
        Ntheta by Nvar size sparse matrix. A Jacobian matrix of the
        (decision variables, parameters) with respect to paramerters
        (=theta_name). number of rows = len(theta_name),
        number of columns= len(col)
    col: list
        List of variable names
    """
    m = model.clone()
    original_Param = []
    perturbed_Param = []
    m.extra = ConstraintList()
    kk = 0
    if var_dic == {}:
        for i in theta_names:
            var_dic[i] = i
    '''
    for v in theta_names:
        v_tmp = str(kk)
        original_param_object = Param(initialize=theta[v], mutable=True)
        perturbed_param_object = Param(initialize=theta[v])
        m.add_component("original_"+v_tmp, original_param_object)
        m.add_component("perturbed_"+v_tmp, perturbed_param_object)
        m.extra.add(eval('m.'+var_dic[v]) - eval('m.original_'+v_tmp) == 0 )
        original_Param.append(original_param_object)
        perturbed_Param.append(perturbed_param_object)
        kk = kk + 1
    m_kaug_dsdp = sensitivity_calculation('kaug',m,original_Param,
                                          perturbed_Param, tee)

    '''
    for i, name in enumerate(theta_names):
        orig_param = Param(initialize=theta[name], mutable=True)
        ptb_param = Param(initialize=theta[name])
        m.add_component("original_%s" % i, orig_param)
        m.add_component("perturbed_%s" % i, ptb_param)
        cuid = ComponentUID(name)
        var = cuid.find_component_on(m)
        m.extra.add(var - orig_param == 0)
        original_Param.append(orig_param)
        perturbed_Param.append(ptb_param)

    m_kaug_dsdp = sensitivity_calculation('kaug', m, original_Param,
                                          perturbed_Param, tee)
    try:
        with open("./dsdp/col_row.col", "r") as myfile:
            col = myfile.read().splitlines()
        with open("./dsdp/col_row.row", "r") as myfile:
            row = myfile.read().splitlines()
        dsdp = np.loadtxt("./dsdp/dsdp_in_.in")
    except Exception as e:
        print('File not found.')
    dsdp = dsdp.reshape((len(theta_names), int(len(dsdp) / len(theta_names))))
    dsdp = dsdp[:len(theta_names), :len(col)]
    try:
        shutil.rmtree('dsdp', ignore_errors=True)
    except OSError:
        pass
    col = [
        i for i in col
        if SensitivityInterface.get_default_block_name() not in i
    ]
    dsdp_out = np.zeros((len(theta_names), len(col)))
    # e.g) k_aug dsdp returns -dx1/dx1 = -1.0
    for i in range(len(theta_names)):
        for j in range(len(col)):
            if SensitivityInterface.get_default_block_name() not in col[j]:
                dsdp_out[i, j] = -dsdp[i, j]

    return sparse.csr_matrix(dsdp_out), col
Beispiel #8
0
def get_dsdp(model, theta_names, theta, tee=False):
    """This function calculates gradient vector of the variables
        with respect to the parameters (theta_names).

    e.g) min f:  p1*x1+ p2*(x2^2) + p1*p2
         s.t  c1: x1 + x2 = p1
              c2: x2 + x3 = p2
              0 <= x1, x2, x3 <= 10
              p1 = 10
              p2 = 5
    the function retuns dx/dp and dp/dp, and column orders.

    The following terms are used to define the output dimensions:
    Ncon   = number of constraints
    Nvar   = number of variables (Nx + Ntheta)
    Nx     = number of decision (primal) variables
    Ntheta = number of uncertain parameters.

    Parameters
    ----------
    model: Pyomo ConcreteModel
        model should include an objective function
    theta_names: list of strings
        List of Var names
    theta: dict
        Estimated parameters e.g) from parmest
    tee: bool, optional
        Indicates that ef solver output should be teed

    Returns
    -------
    dsdp: scipy.sparse.csr.csr_matrix
        Ntheta by Nvar size sparse matrix. A Jacobian matrix of the
        (decision variables, parameters) with respect to parameters
        (theta_names). number of rows = len(theta_name), number of
        columns = len(col)
    col: list
        List of variable names
    """
    # Get parameters from names. In SensitivityInterface, we expect
    # these to be parameters on the original model.
    param_list = []
    for name in theta_names:
        comp = model.find_component(name)
        if comp is None:
            raise RuntimeError("Cannot find component %s on model" % name)
        if comp.ctype is Var:
            # If theta_names correspond to Vars in the model, these vars
            # need to be fixed.
            comp.fix()
        param_list.append(comp)

    sens = SensitivityInterface(model, clone_model=True)
    m = sens.model_instance

    # Setup model and calculate sensitivity matrix with k_aug
    sens.setup_sensitivity(param_list)
    k_aug = K_augInterface()
    k_aug.k_aug(m, tee=tee)

    # Write row and col files in a temp dir, then immediately
    # read into a Python data structure.
    nl_data = {}
    with InTempDir():
        base_fname = "col_row"
        nl_file = ".".join((base_fname, "nl"))
        row_file = ".".join((base_fname, "row"))
        col_file = ".".join((base_fname, "col"))
        m.write(nl_file, io_options={"symbolic_solver_labels": True})
        for fname in [nl_file, row_file, col_file]:
            with open(fname, "r") as fp:
                nl_data[fname] = fp.read()

    # Create more useful data structures from strings
    dsdp = np.fromstring(k_aug.data["dsdp_in_.in"], sep="\n\t")
    col = nl_data[col_file].strip("\n").split("\n")
    row = nl_data[row_file].strip("\n").split("\n")

    dsdp = dsdp.reshape((len(theta_names), int(len(dsdp) / len(theta_names))))
    dsdp = dsdp[:len(theta_names), :len(col)]

    col = [i for i in col if sens.get_default_block_name() not in i]
    dsdp_out = np.zeros((len(theta_names), len(col)))
    for i in range(len(theta_names)):
        for j in range(len(col)):
            if sens.get_default_block_name() not in col[j]:
                dsdp_out[
                    i,
                    j] = -dsdp[i, j]  # e.g) k_aug dsdp returns -dx1/dx1 = -1.0

    return scipy.sparse.csr_matrix(dsdp_out), col
Beispiel #9
0
def lingen(ntrain, p):
    nval = 1000
    n = ntrain + nval

    # Define model statistics
    s = 5  # s determines the number of nonzero regression components
    snr = 5  # snr = var(response)/var(residuals)
    rho = 0.5  # Correlation between columns of X 0<= rho <= 1
    b_type = [
        1, 2, 3, 5
    ]  # Defined in accordance with test set used in Tibshirani's comparison

    # The desired mean values of the sample
    mu = np.zeros([p])

    # The desired covariance matrix
    r = np.zeros([p, p])
    for i in range(p):
        for j in range(p):
            r[i, j] = rho**abs(i - j)

    # Generate the random samples
    x = np.random.multivariate_normal(mu, r, size=n)

    # Determine true model(s)
    t1f = lambda m, n: [i * n // m + n // (2 * m) for i in range(m)]
    b_true = np.zeros([len(b_type), p])
    ind = 0
    for i in b_type:
        if i == 1:
            # s components equal to 1 at ~equivalently spaced indices
            b_true[ind][t1f(s, p)] = 1.0
            ind += 1
        elif i == 2:
            # First s components equal to 1
            b_true[ind][0:s] = 1.0
            ind += 1
        elif i == 3:
            # First s components equally spaced between 0.5 and 10
            b_true[ind][0:s] = np.linspace(0.5, 10, s)
            #            print( b_true[ind][0:s])
            ind += 1
        elif i == 5:
            # First 2 equal to 1, the rest decaying according to 0.5 ** (j - s)
            b_true[ind][0:s] = 1.0
            for j in range(s, p):
                b_true[ind][j] = 0.5**(j - s)

    # Sample response data and add noise
    y = np.zeros([len(b_type), n])
    for i in range(len(b_type)):
        bvec = b_true[i]
        res = np.matmul(x, bvec)
        # Add noise of specified signal-to-noise ratio
        res[:] = res[:] + np.random.normal(
            np.zeros([n]),
            np.ones([n]) * np.sqrt(np.var(res) / snr))
        y[i, :] = res

    z = {}
    z = {str(i): y[0][i - 1] for i in range(1, ntrain + 1)}
    #        z = y[0][0:ntrain]
    x_n = {}
    for i in range(1, ntrain + 1):
        x_n[str(i)] = {('p' + str(j + 1)): x[i - 1, j] for j in range(0, p)}


#        x_n = x[0:ntrain,:]
    return z, x_n, ntrain, p