Esempio n. 1
0
        def solve_int(self, instantiate = True, bounds = True):
            #glp_term_out(GLP_OFF);                                                   
            if not self._ready:
                self.update()
            #glp_term_out(GLP_ON);                                                    
            if self._cols == None or self._rows == None:
                self._read_variables()
            if bounds:
                self._apply_bounds()
            if  glpk.glp_get_num_int(self._lp) == 0:   # problem is continuous              
                res = glpk.glp_simplex(self._lp, self._parm) # self._parm !!!              
            else:   # problem is MIP                                                  
                if self._tran:
                    glpk.glp_mpl_build_prob(self._tran, self._lp);
                res = glpk.glp_simplex(self._lp, self._parm);  # ??? should use dual simplex ???                                                                            
                iocp_param = glpk.glp_iocp();
                glpk.glp_init_iocp(iocp_param);
                #iocp_param.tm_lim=600*1000

                iocp_param.mip_gap=0.95

                glpk.glp_intopt(self._lp, iocp_param);
                if self._tran:
                    ret = glpk.glp_mpl_postsolve(self._tran, self._lp, glpk.GLP_MIP);
                    if ret != 0:
                        print "Error on postsolving model"
                        raise AttributeError
            if instantiate:
                self._instantiate_solution()
            if res != 0:
                return None
            else:
                return glpk.glp_get_obj_val(self._lp);
Esempio n. 2
0
def lp_solve(prob):
    # ret = glpk.glp_simplex(prob, glpk.NULL)
    # lp_prob_print(prob)
    ret = glpk.glp_simplex(prob, None)
    status = glpk.glp_get_status(prob)
    if ret == 0 and status == glpk.GLP_OPT:
        return glpk.glp_get_obj_val(prob)
    elif ret == glpk.GLP_ENOPFS:
        print('LP has no Primal feasible solution')
        sys.exit(1)
    elif status == glpk.GLP_UNBND:
        minmax = glpk.glp_get_obj_dir(prob)
        ans = INFTY if minmax == glpk.GLP_MAX else -INFTY
        return ans
    else:
        lp_prob_print(prob)
        print('ERROR: LP failed')
        print('return = {0}, status = {1}'.format(ret, status))
        sys.exit(1)
    return 0
Esempio n. 3
0
glpk.glp_add_cols(lp, 2) # two unknowns: x and y
glpk.glp_set_col_bnds(lp, 1, glpk.GLP_DB, 0., 1.) # 0 <= x <= 1
glpk.glp_set_col_bnds(lp, 2, glpk.GLP_DB, 0., 1.) # 0 <= y <= 1
glpk.glp_add_rows(lp, 2) # 2 constraints
matrix_row_index = glpk.intArray(5)
matrix_column_index = glpk.intArray(5)
matrix_content = glpk.doubleArray(5)
# First constraint: x + y >= 1.
matrix_row_index[1] = 1; matrix_column_index[1] = 1; matrix_content[1] = 1.;
matrix_row_index[2] = 1; matrix_column_index[2] = 2; matrix_content[2] = 1.;
glpk.glp_set_row_bnds(lp, 1, glpk.GLP_LO, 1., 0.)
# Second constraint: - x + y >= -0.5.
matrix_row_index[3] = 2; matrix_column_index[3] = 1; matrix_content[3] = -1.;
matrix_row_index[4] = 2; matrix_column_index[4] = 2; matrix_content[4] = 1.;
glpk.glp_set_row_bnds(lp, 2, glpk.GLP_LO, -0.5, 0.)
# Load them
glpk.glp_load_matrix(lp, 4, matrix_row_index, matrix_column_index, matrix_content)
# Cost function: 0.5 x + y
glpk.glp_set_obj_coef(lp, 1, 0.5)
glpk.glp_set_obj_coef(lp, 2, 1.)
# Solve the linear programming problem
options = glpk.glp_smcp()
glpk.glp_init_smcp(options)
options.msg_lev = glpk.GLP_MSG_ERR
options.meth = glpk.GLP_DUAL
glpk.glp_simplex(lp, options)
print "Computed optimum: x = ", glpk.glp_get_col_prim(lp, 1), ", y = ", glpk.glp_get_col_prim(lp, 2) ,", obj = ", glpk.glp_get_obj_val(lp)
print "Expected optimum: x = 0.75, y = 0.25, obj = 0.625"
glpk.glp_delete_prob(lp)

Esempio n. 4
0
matrix_content[1] = 1.
matrix_row_index[2] = 1
matrix_column_index[2] = 2
matrix_content[2] = 1.
glpk.glp_set_row_bnds(lp, 1, glpk.GLP_LO, 1., 0.)
# Second constraint: - x + y >= -0.5.
matrix_row_index[3] = 2
matrix_column_index[3] = 1
matrix_content[3] = -1.
matrix_row_index[4] = 2
matrix_column_index[4] = 2
matrix_content[4] = 1.
glpk.glp_set_row_bnds(lp, 2, glpk.GLP_LO, -0.5, 0.)
# Load them
glpk.glp_load_matrix(lp, 4, matrix_row_index, matrix_column_index,
                     matrix_content)
# Cost function: 0.5 x + y
glpk.glp_set_obj_coef(lp, 1, 0.5)
glpk.glp_set_obj_coef(lp, 2, 1.)
# Solve the linear programming problem
options = glpk.glp_smcp()
glpk.glp_init_smcp(options)
options.msg_lev = glpk.GLP_MSG_ERR
options.meth = glpk.GLP_DUAL
glpk.glp_simplex(lp, options)
print "Computed optimum: x = ", glpk.glp_get_col_prim(
    lp, 1), ", y = ", glpk.glp_get_col_prim(
        lp, 2), ", obj = ", glpk.glp_get_obj_val(lp)
print "Expected optimum: x = 0.75, y = 0.25, obj = 0.625"
glpk.glp_delete_prob(lp)
Esempio n. 5
0
 def get_alpha_LB(self, mu, safeguard=True):
     self.load_reduced_data_structures()
     
     lp = glpk.glp_create_prob()
     glpk.glp_set_obj_dir(lp, glpk.GLP_MIN)
     Qa = self.parametrized_problem.Qa
     N = self.N
     M_e = self.M_e
     if M_e < 0:
         M_e = N
     if M_e > len(self.C_J):
         M_e = len(self.C_J) # = N
     M_p = self.M_p
     if M_p < 0:
         M_p = N
     if M_p > len(self.complement_C_J):
         M_p = len(self.complement_C_J)
     
     # 1. Linear program unknowns: Qa variables, y_1, ..., y_{Q_a}
     glpk.glp_add_cols(lp, Qa)
     
     # 2. Range: constrain the variables to be in the bounding box (note: GLPK indexing starts from 1)
     for qa in range(Qa):
         if self.B_min[qa] < self.B_max[qa]: # the usual case
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_DB, self.B_min[qa], self.B_max[qa])
         elif self.B_min[qa] == self.B_max[qa]: # unlikely, but possible
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_FX, self.B_min[qa], self.B_max[qa])
         else: # there is something wrong in the bounding box: set as unconstrained variable
             print "Warning: wrong bounding box for affine expansion element #", qa
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_FR, 0., 0.)
     
     # 3. Add two different sets of constraints
     glpk.glp_add_rows(lp, M_e + M_p)
     array_size = (M_e + M_p)*Qa
     matrix_row_index = glpk.intArray(array_size + 1) # + 1 since GLPK indexing starts from 1
     matrix_column_index = glpk.intArray(array_size + 1)
     matrix_content = glpk.doubleArray(array_size + 1)
     glpk_container_size = 0
     
     # 3a. Add constraints: a constraint is added for the closest samples to mu in C_J
     closest_C_J_indices = self.closest_parameters(M_e, self.C_J, mu)
     for j in range(M_e):
         # Overwrite parameter values
         omega = self.xi_train[ self.C_J[ closest_C_J_indices[j] ] ]
         self.parametrized_problem.setmu(omega)
         current_theta_a = self.parametrized_problem.compute_theta_a()
         
         # Assemble the LHS of the constraint
         for qa in range(Qa):
             matrix_row_index[glpk_container_size + 1] = int(j + 1)
             matrix_column_index[glpk_container_size + 1] = int(qa + 1)
             matrix_content[glpk_container_size + 1] = current_theta_a[qa]
             glpk_container_size += 1
         
         # Assemble the RHS of the constraint
         glpk.glp_set_row_bnds(lp, j + 1, glpk.GLP_LO, self.alpha_J[ closest_C_J_indices[j] ], 0.)
     closest_C_J_indices = None
     
     # 3b. Add constraints: also constrain the closest point in the complement of C_J, 
     #                      with RHS depending on previously computed lower bounds
     closest_complement_C_J_indices = self.closest_parameters(M_p, self.complement_C_J, mu)
     for j in range(M_p):
         nu = self.xi_train[ self.complement_C_J[ closest_complement_C_J_indices[j] ] ]
         self.parametrized_problem.setmu(nu)
         current_theta_a = self.parametrized_problem.compute_theta_a()
         # Assemble first the LHS
         for qa in range(Qa):
             matrix_row_index[glpk_container_size + 1] = int(M_e + j + 1)
             matrix_column_index[glpk_container_size + 1] = int(qa + 1)
             matrix_content[glpk_container_size + 1] = current_theta_a[qa]
             glpk_container_size += 1
         # ... and then the RHS
         glpk.glp_set_row_bnds(lp, M_e + j + 1, glpk.GLP_LO, self.alpha_LB_on_xi_train[ self.complement_C_J[ closest_complement_C_J_indices[j] ] ], 0.)
     closest_complement_C_J_indices = None
     
     # Load the assembled LHS
     glpk.glp_load_matrix(lp, array_size, matrix_row_index, matrix_column_index, matrix_content)
     
     # 4. Add cost function coefficients
     self.parametrized_problem.setmu(mu)
     current_theta_a = self.parametrized_problem.compute_theta_a()
     for qa in range(Qa):
         glpk.glp_set_obj_coef(lp, qa + 1, current_theta_a[qa])
     
     # 5. Solve the linear programming problem
     options = glpk.glp_smcp()
     glpk.glp_init_smcp(options)
     options.msg_lev = glpk.GLP_MSG_ERR
     options.meth = glpk.GLP_DUAL
     glpk.glp_simplex(lp, options)
     alpha_LB = glpk.glp_get_obj_val(lp)
     glpk.glp_delete_prob(lp)
     
     # 6. If a safeguard is requested (when called in the online stage of the RB method),
     #    we check the resulting value of alpha_LB. In order to avoid divisions by zero
     #    or taking the square root of a negative number, we allow an inefficient evaluation.
     if safeguard == True:
         tol = 1e-10
         alpha_UB = self.get_alpha_UB(mu)
         if alpha_LB/alpha_UB < tol:
             print "SCM warning: alpha_LB is <= 0 at mu = " + str(mu) + ".",
             print "Please consider a larger Nmax for SCM. Meanwhile, a truth",
             print "eigensolve is performed."
             
             (alpha_LB, discarded1, discarded2) = self.truth_coercivity_constant()
             
         if alpha_LB/alpha_UB > 1 + tol:
             print "SCM warning: alpha_LB is > alpha_UB at mu = " + str(mu) + ".",
             print "This should never happen!"
     
     return alpha_LB
Esempio n. 6
0
 def get_alpha_LB(self, mu, safeguard=True):
     self.load_red_data_structures()
     
     lp = glpk.glp_create_prob()
     glpk.glp_set_obj_dir(lp, glpk.GLP_MIN)
     Qa = self.parametrized_problem.Qa
     N = self.N
     M_e = self.M_e
     if M_e < 0:
         M_e = N
     if M_e > len(self.C_J):
         M_e = len(self.C_J) # = N
     M_p = self.M_p
     if M_p < 0:
         M_p = N
     if M_p > len(self.complement_C_J):
         M_p = len(self.complement_C_J)
     
     # 1. Linear program unknowns: Qa variables, y_1, ..., y_{Q_a}
     glpk.glp_add_cols(lp, Qa)
     
     # 2. Range: constrain the variables to be in the bounding box (note: GLPK indexing starts from 1)
     for qa in range(Qa):
         if self.B_min[qa] < self.B_max[qa]: # the usual case
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_DB, self.B_min[qa], self.B_max[qa])
         elif self.B_min[qa] == self.B_max[qa]: # unlikely, but possible
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_FX, self.B_min[qa], self.B_max[qa])
         else: # there is something wrong in the bounding box: set as unconstrained variable
             print "Warning: wrong bounding box for affine expansion element #", qa
             glpk.glp_set_col_bnds(lp, qa + 1, glpk.GLP_FR, 0., 0.)
     
     # 3. Add two different sets of constraints
     glpk.glp_add_rows(lp, M_e + M_p)
     array_size = (M_e + M_p)*Qa
     matrix_row_index = glpk.intArray(array_size + 1) # + 1 since GLPK indexing starts from 1
     matrix_column_index = glpk.intArray(array_size + 1)
     matrix_content = glpk.doubleArray(array_size + 1)
     glpk_container_size = 0
     
     # 3a. Add constraints: a constraint is added for the closest samples to mu in C_J
     closest_C_J_indices = self.closest_parameters(M_e, self.C_J, mu)
     for j in range(M_e):
         # Overwrite parameter values
         omega = self.xi_train[ self.C_J[ closest_C_J_indices[j] ] ]
         self.parametrized_problem.setmu(omega)
         current_theta_a = self.parametrized_problem.compute_theta_a()
         
         # Assemble the LHS of the constraint
         for qa in range(Qa):
             matrix_row_index[glpk_container_size + 1] = int(j + 1)
             matrix_column_index[glpk_container_size + 1] = int(qa + 1)
             matrix_content[glpk_container_size + 1] = current_theta_a[qa]
             glpk_container_size += 1
         
         # Assemble the RHS of the constraint
         glpk.glp_set_row_bnds(lp, j + 1, glpk.GLP_LO, self.alpha_J[ closest_C_J_indices[j] ], 0.)
     closest_C_J_indices = None
     
     # 3b. Add constraints: also constrain the closest point in the complement of C_J, 
     #                      with RHS depending on previously computed lower bounds
     closest_complement_C_J_indices = self.closest_parameters(M_p, self.complement_C_J, mu)
     for j in range(M_p):
         nu = self.xi_train[ self.complement_C_J[ closest_complement_C_J_indices[j] ] ]
         self.parametrized_problem.setmu(nu)
         current_theta_a = self.parametrized_problem.compute_theta_a()
         # Assemble first the LHS
         for qa in range(Qa):
             matrix_row_index[glpk_container_size + 1] = int(M_e + j + 1)
             matrix_column_index[glpk_container_size + 1] = int(qa + 1)
             matrix_content[glpk_container_size + 1] = current_theta_a[qa]
             glpk_container_size += 1
         # ... and then the RHS
         glpk.glp_set_row_bnds(lp, M_e + j + 1, glpk.GLP_LO, self.alpha_LB_on_xi_train[ self.complement_C_J[ closest_complement_C_J_indices[j] ] ], 0.)
     closest_complement_C_J_indices = None
     
     # Load the assembled LHS
     glpk.glp_load_matrix(lp, array_size, matrix_row_index, matrix_column_index, matrix_content)
     
     # 4. Add cost function coefficients
     self.parametrized_problem.setmu(mu)
     current_theta_a = self.parametrized_problem.compute_theta_a()
     for qa in range(Qa):
         glpk.glp_set_obj_coef(lp, qa + 1, current_theta_a[qa])
     
     # 5. Solve the linear programming problem
     options = glpk.glp_smcp()
     glpk.glp_init_smcp(options)
     options.msg_lev = glpk.GLP_MSG_ERR
     options.meth = glpk.GLP_DUAL
     glpk.glp_simplex(lp, options)
     alpha_LB = glpk.glp_get_obj_val(lp)
     glpk.glp_delete_prob(lp)
     
     # 6. If a safeguard is requested (when called in the online stage of the RB method),
     #    we check the resulting value of alpha_LB. In order to avoid divisions by zero
     #    or taking the square root of a negative number, we allow an inefficient evaluation.
     if safeguard == True:
         tol = 1e-10
         alpha_UB = self.get_alpha_UB(mu)
         if alpha_LB/alpha_UB < tol:
             print "SCM warning: alpha_LB is <= 0 at mu = " + str(mu) + ".",
             print "Please consider a larger Nmax for SCM. Meanwhile, a truth",
             print "eigensolve is performed."
             
             (alpha_LB, discarded1, discarded2) = self.truth_coercivity_constant()
             
         if alpha_LB/alpha_UB > 1 + tol:
             print "SCM warning: alpha_LB is > alpha_UB at mu = " + str(mu) + ".",
             print "This should never happen!"
     
     return alpha_LB