Exemplo n.º 1
0
def nash_equilibrium(a):
    size = a.shape
    rows = size[0]
    cols = size[1]

    #-------------NEW PART--------------
    k = a[0][0]

    for i in range(0, rows):
        for j in range(0, cols):
            if k > a[i][j]:
                k = a[i][j]

    if k < 0:
        k = -k + 1
    else:
        k = k + 1

    for i in range(0, rows):
        for j in range(0, cols):
            a[i][j] = a[i][j] + k


#------------------------------------

    a1 = a.transpose()
    b1 = np.ones((1, cols), dtype=np.int)
    c1 = np.ones((1, rows), dtype=np.int)

    a2 = a
    b2 = np.ones((1, rows), dtype=np.int)
    c2 = np.ones((1, cols), dtype=np.int)

    v = 1 / (lp(c1[0], -a1, -b1[0]).fun)
    p = lp(c1[0], -a1, -b1[0]).x * v

    v = -1 / (lp(-c2[0], a2, b2[0]).fun)
    q = lp(-c2[0], a2, b2[0]).x * v

    #-------------------------------------
    v = v - k
    #-------------------------------------

    print(v)
    print(p)
    print(q)

    return v, p, q
def lp_solve(cur_est_rets, limit_factors, cur_benchmark_wt, num_multi=5):
    """
    线性规划计算函数:
    输入:截面预期收益,约束条件(风险因子),截面标的指数成分股权重,个股权重约束倍数
    输出:经优化后的组合内个股权重
    """
    data = pd.concat([cur_est_rets, limit_factors, cur_benchmark_wt], axis=1)
    data = data.dropna(how='any', axis=0)
    cur_est_rets, limit_factors, cur_benchmark_wt = (data.iloc[:, 0:1],
                                                     data.iloc[:, 1:-1],
                                                     data.iloc[:, -1])

    #****请勿改成简写形式(df /= df.sum())  错误原因待查****
    cur_benchmark_wt = cur_benchmark_wt / cur_benchmark_wt.sum()

    c = cur_est_rets.values.flatten()

    A_ub = None
    b_ub = None
    A_eq = np.r_[limit_factors.T.values,
                 np.repeat(1, len(limit_factors)).reshape(1, -1)]
    b_eq = np.r_[np.dot(limit_factors.T, cur_benchmark_wt), np.array([1])]
    bounds = tuple([(0, num_multi * wt_in_index)
                    for wt_in_index in cur_benchmark_wt.values])
    res = lp(-c, A_ub, b_ub, A_eq, b_eq, bounds)

    cur_wt = pd.Series(res.x, index=cur_est_rets.index)
    return cur_wt
def checkFormClosure(contacts):
    """Checks planar form closure given a set of contact points and contact normals

    :param contacts: List contact points. Each element should be of format (x,y,theta)
                    i.e. (x,y) contact location
                        and the direction of the contact normal specified by theta (radians)
    :return result: A logical value where TRUE means form closure
    :return output: Solution of the linear programming problem, if form closure is true.
                    Else null list.

    Example Input:
        contacts = [[3,5,4.712],[3,5,0.785],[12,5,4.712],[12,5,0.785]]
    Output:
    """

    contactCount = len(contacts)

    # Generating wrench matrix F  : (Mz, Fx, Fy)
    F = [[0 for x in range(contactCount)] for y in range(3)]
    for contactID in range(contactCount):
        x, y, theta = contacts[contactID]

        Fx = math.cos(theta)
        Fy = math.sin(theta)
        forceVector = [Fx, Fy, 0]  # Force vector
        pointVector = [x, y, 0]  # Vector from origin to contact point
        M = np.cross(pointVector, forceVector)
        Mx, My, Mz = M

        F[0][contactID] = Mz
        F[1][contactID] = Fx
        F[2][contactID] = Fy

    f = [1 for x in range(contactCount)]
    A = [[0 for x in range(contactCount)] for y in range(contactCount)]
    for x in range(contactCount):
        A[x][x] = -1
    b = [-1 for x in range(contactCount)]

    Aeq = F
    beq = [0 for x in range(3)]

    lpOutput = lp(f, A, b, Aeq, beq)
    k = []
    result = lpOutput.get("success")
    if result:
        k = lpOutput.get("x")
    return result, k
Exemplo n.º 4
0
    def runLP(self, results, c_bnds, A_ub, b_ub_log, b_eq_log, fields,
              water_sources):
        """
        :params results: Pandas Dataframe of possible field combinations, used to store LP results
        :params c_bnds: Pandas dataframe of c coefficients and bounds for each field and water source
        :param A_ub: left hand side upper bounds
        :param b_ub: right hand side upper bounds
        :param b_eq_log: right hand equality constraints
        :param fields: Pandas DataFrame of FarmField objects to consider
        :param water_sources: Water Sources to consider
        """

        #Create a list of all fields, column by column
        f = []
        for field in fields:
            f += fields[field].tolist()
        #End for

        fields = f

        if (type(results) != pd.DataFrame) or results.empty:
            results = c_bnds.drop(["max_area", "bounds"], axis=1)
            results["profit"] = np.zeros
            results["farm_area"] = np.zeros
            results["area_breakdown"] = np.zeros

        #Build list of c, b_ub, and bounds for each combination of fields
        temp_df = c_bnds.drop("max_area", axis=1)

        c_length = len(A_ub[0])

        for row in c_bnds.itertuples():

            i = row.Index

            #Skip rows that are all NaNs
            if all(np.isnan(x) for x in row[1:]):
                continue
            #End if

            c = []

            for j in temp_df.iloc[i]:
                if (type(j) is not list) and (type(j) is not tuple):
                    c.append(j if np.isnan(j) == False else 0)
                elif type(j) is list:
                    b_ub = j
                elif type(j) is tuple:
                    bounds = j
            #End for

            #Insert a list at an index as we
            #need to include bounds for each GW+SW combination
            b_ub_list = b_ub_log.iloc[0].tolist()
            b_ub = []

            #Generate maximum bounds for b_ub
            #Generate a map between fields and Aub constraints
            self.genAubMap(fields, water_sources)

            #Calculate max area possible with each water source
            max_ws_areas = [f.area for f in self.A_ub_map]

            #Grab the maximum field area and farm area
            max_field_areas = [f.area for f in np.unique(self.A_ub_map)]
            max_b_ub = max_ws_areas + max_field_areas + [sum(max_field_areas)]

            #Generate b_ub for each A_ub entry
            #Calculate total sum for each right hand upper bound combination
            for idx, ub in enumerate(A_ub):

                temp = np.array([])
                #Create association between Field-WaterSource irrigation area and b_ub True/False
                for f, b in zip(b_ub_list, ub):
                    temp = np.append(temp, f) if b == True else temp
                #End for

                b_ub.append(min(sum(temp), max_b_ub[idx]))
            #End for

            assert len(A_ub) == len(
                b_ub
            ), "Number of A ub rows must be equal to number of elements in b_ub"

            try:
                b_eq = b_eq_log.iloc[i].tolist()
            except IndexError:
                b_eq = []
            #End try

            #If there is only one equality constraint, ensure that it matches possible farm area
            #LP crashes out if this is not set
            if (len(b_eq) == 1):
                b_eq[0] = bounds[1]
            #End if

            if len(fields) > 1:
                #b_ub.append(c_bnds.iloc[i]['max_area'])
                A_eq = A_ub[c_length:-1]
            else:
                A_eq = A_ub[c_length:]
            #End if

            try:

                bounds = [(0, None)] * len(
                    c)  #[(0, c_bnds['max_area'][0]) for b in b_ub_list ]
                #bounds = [(0, min(b, c_bnds['max_area'][0])) for b in b_ub_list]

                A_eq = [
                    A_eq[j] for j in xrange(len(b_eq))
                    if np.isnan(b_eq[j]) == False
                ]
                b_eq = [
                    x for x in b_eq if str(x) != 'nan'
                ]  #Template is copied from b_ub which has extra elements; remove the unneeded entries

                if (len(A_eq) == 0) and (len(b_eq) == 0):
                    A_eq = None
                    b_eq = None
                #End if

                # assert len(A_eq) == len(b_eq), "Number of equality constraints do not match ({A} != {b}, A_eq != b_eq)".format(A=len(A_eq), b=len(b_eq))

                res = lp(c=c,
                         A_ub=A_ub,
                         A_eq=A_eq,
                         b_ub=b_ub,
                         b_eq=b_eq,
                         bounds=bounds)

            except (ValueError, IndexError) as e:
                print "====================="
                print c_bnds
                print "c: ", c
                print "A_ub: ", A_ub
                print "b_ub: ", b_ub
                print "--------------"
                print "A_ub[c]: ", A_ub[c_length:]
                print "A_eq: ", A_eq
                print "b_eq: ", b_eq
                print "Bounds:", bounds

                print "b_eq_log: "
                print b_eq_log

                print len(c)
                print len(A_ub)
                print len(b_ub)

                print "===================\n"
                print e
                print e.args
                print "===================\n"

                import sys
                sys.exit('Error occured during LP')
            #End try

            if res.success is False:
                print "-------------------"
                print res
                print "c: ", c
                print "A_ub: ", A_ub
                print "b_ub", b_ub
                print "A_eq", A_eq
                print "b_eq", b_eq
                print "bounds: ", bounds
                print "c_length: ", c_length
                print results
                print "------------------\n\n"
                print "LP failed!"
                import sys
                sys.exit()
            #End if

            #import sys; sys.exit('Exit inside LP run for debug')

            #Area of each field/water source, (estimated) profit made, area breakdown
            row = np.append(res.x, [res.fun, sum(res.x)]).tolist() + [
                '| '.join(str(e) for e in res.x.tolist())
            ]

            results.loc[i] = row

        #End for

        return results
Aeq[pss["plants"] + pss["stocks"]:, :] = NA[pss["plants"] + pss["stocks"]:, :]

# load cost array of arcs dimensions
C = np.array([
    100, 200, 100, 200, 150, 150, 150, 150, 200, 100, 200, 100, 100, 150, 200,
    100, 150, 200, 200, 150, 100, 200, 150, 100
])

# "linear equality constraint" of nodes dimensions
b = np.array(
    [20, 30, 10, 40, 30, 10, 0, 0, 0, 0, -30, -40, -10, -20, -20, -20])

# Initialize Split b to b for upper bound and b for equal constraints
bub = np.zeros(b.shape)
beq = np.zeros(b.shape)

bub[:pss["plants"] + pss["stocks"]] = b[:pss["plants"] + pss["stocks"]]
beq[pss["plants"] + pss["stocks"]:] = b[pss["plants"] + pss["stocks"]:]

## load bounds of arcs dimensions
#ubs = np.array([np.inf,np.inf,np.inf,np.inf,10,5,10,5,5,5,3,3,6,3,6,3,3,3,np.inf,np.inf,np.inf,np.inf,np.inf])
bounds = np.zeros((C.shape[0])).astype(tuple)
for i in range(0, C.shape[0]):
    bounds[i] = (0, np.inf)

bounds = tuple(bounds)

# solve the linear prog for simplex and interior point
res = lp(C, A_ub=Aub, A_eq=Aeq, b_ub=bub, b_eq=beq, bounds=bounds)
res_ip = lp(C, A_eq=NA, b_eq=b, bounds=bounds, method='interior-point')
Exemplo n.º 6
0
	b_eq = [area]

	#Costs per Ha of producing the negated profit
	# A_ub = [
	# 	[Wheat.water_use_ML_per_Ha, Canola.water_use_ML_per_Ha, Tomato.water_use_ML_per_Ha],
	# ]

	A_ub = [Wheat.water_use_ML_per_Ha, Canola.water_use_ML_per_Ha, Tomato.water_use_ML_per_Ha]

	#Constraints to producing negated profit 
	b_ub = [
		water_entitlement
	]

	bounds = [(0, area), (0, area), (0, area)]

	res = lp(c, A_eq=A_eq, b_eq=b_eq, A_ub=A_ub, b_ub=b_ub, bounds=bounds, options={"disp": True})

	print(res)

	print c
	print A_ub
	print b_ub

	# print (tomato_cost - tomato_profit) * area
	# print (wheat_cost - wheat_profit) * area
	# print (canola_cost - canola_profit) * area



Exemplo n.º 7
0
    def runLP(self, results, c_bnds, A_ub, b_ub_log, b_eq_log, fields, water_sources):

        """
        :params results: Pandas Dataframe of possible field combinations, used to store LP results
        :params c_bnds: Pandas dataframe of c coefficients and bounds for each field and water source
        :param A_ub: left hand side upper bounds
        :param b_ub: right hand side upper bounds
        :param b_eq_log: right hand equality constraints
        :param fields: Pandas DataFrame of FarmField objects to consider
        :param water_sources: Water Sources to consider
        """

        #Create a list of all fields, column by column
        f = []
        for field in fields:
            f += fields[field].tolist()
        #End for

        fields = f

        if (type(results) != pd.DataFrame) or results.empty:
            results = c_bnds.drop(["max_area", "bounds"], axis=1)
            results["profit"] = np.zeros
            results["farm_area"] = np.zeros
            results["area_breakdown"] = np.zeros

        #Build list of c, b_ub, and bounds for each combination of fields
        temp_df = c_bnds.drop("max_area", axis=1)

        c_length = len(A_ub[0])

        for row in c_bnds.itertuples():

            i = row.Index

            #Skip rows that are all NaNs
            if all( np.isnan(x) for x in row[1:] ):
                continue
            #End if

            c = []

            for j in temp_df.iloc[i]:
                if (type(j) is not list) and (type(j) is not tuple):
                    c.append(j if np.isnan(j) == False else 0)
                elif type(j) is list:
                    b_ub = j
                elif type(j) is tuple:
                    bounds = j
            #End for

            #Insert a list at an index as we
            #need to include bounds for each GW+SW combination
            b_ub_list = b_ub_log.iloc[0].tolist()
            b_ub = []

            #Generate maximum bounds for b_ub
            #Generate a map between fields and Aub constraints
            self.genAubMap(fields, water_sources)

            #Calculate max area possible with each water source
            max_ws_areas = [f.area for f in self.A_ub_map]

            #Grab the maximum field area and farm area
            max_field_areas = [f.area for f in np.unique(self.A_ub_map)]
            max_b_ub = max_ws_areas + max_field_areas + [sum(max_field_areas)]
            
            #Generate b_ub for each A_ub entry
            #Calculate total sum for each right hand upper bound combination
            for idx, ub in enumerate(A_ub):

                temp = np.array([])
                #Create association between Field-WaterSource irrigation area and b_ub True/False
                for f, b in zip(b_ub_list, ub):
                    temp = np.append(temp, f) if b == True else temp
                #End for

                b_ub.append(min(sum(temp), max_b_ub[idx]))
            #End for

            assert len(A_ub) == len(b_ub), "Number of A ub rows must be equal to number of elements in b_ub"

            try:
                b_eq = b_eq_log.iloc[i].tolist()
            except IndexError:
                b_eq = []
            #End try

            #If there is only one equality constraint, ensure that it matches possible farm area
            #LP crashes out if this is not set
            if (len(b_eq) == 1):
                b_eq[0] = bounds[1]
            #End if

            if len(fields) > 1:
                #b_ub.append(c_bnds.iloc[i]['max_area'])
                A_eq = A_ub[c_length:-1]
            else:
                A_eq = A_ub[c_length:]
            #End if

            try:

                bounds = [(0, None)] * len(c) #[(0, c_bnds['max_area'][0]) for b in b_ub_list ]
                #bounds = [(0, min(b, c_bnds['max_area'][0])) for b in b_ub_list]

                A_eq = [A_eq[j] for j in xrange(len(b_eq)) if np.isnan(b_eq[j]) == False]
                b_eq = [x for x in b_eq if str(x) != 'nan'] #Template is copied from b_ub which has extra elements; remove the unneeded entries

                if (len(A_eq) == 0) and (len(b_eq) == 0):
                    A_eq = None
                    b_eq = None
                #End if

                # assert len(A_eq) == len(b_eq), "Number of equality constraints do not match ({A} != {b}, A_eq != b_eq)".format(A=len(A_eq), b=len(b_eq))

                res = lp(c=c, A_ub=A_ub, A_eq=A_eq, b_ub=b_ub, b_eq=b_eq, bounds=bounds)

            except (ValueError, IndexError) as e:
                print "====================="
                print c_bnds
                print "c: ", c
                print "A_ub: ", A_ub
                print "b_ub: ", b_ub
                print "--------------"
                print "A_ub[c]: ", A_ub[c_length:]
                print "A_eq: ", A_eq
                print "b_eq: ", b_eq
                print "Bounds:", bounds

                print "b_eq_log: "
                print b_eq_log

                print len(c)
                print len(A_ub)
                print len(b_ub)

                print "===================\n"
                print e
                print e.args
                print "===================\n"

                import sys; sys.exit('Error occured during LP')
            #End try

            if res.success is False:
                print "-------------------"
                print res
                print "c: ", c
                print "A_ub: ", A_ub
                print "b_ub", b_ub
                print "A_eq", A_eq
                print "b_eq", b_eq
                print "bounds: ", bounds
                print "c_length: ", c_length
                print results
                print "------------------\n\n"
                print "LP failed!"
                import sys; sys.exit()
            #End if

            #import sys; sys.exit('Exit inside LP run for debug')

            #Area of each field/water source, (estimated) profit made, area breakdown
            row = np.append(res.x, [res.fun, sum(res.x)]).tolist() + ['| '.join(str(e) for e in res.x.tolist())]

            results.loc[i] = row

        #End for

        return results
# transform Node to Node matrix to Node Arc matrix, using the function made in basic utils EX0
# We need 2 matrix, the A_eq matrix that corresponds to the NA matrix, and the time constraint array
NA, arcs = nodoNodoANodoArco(NN)
Aeq = NA
Aub = [[3,1,3,1,3,3,5]]


# load cost array of arcs dimensions
C = np.array([2,1,2,5,2,1,2])

# "linear equality constraint" of nodes dimensions. Equality constraints for each node for flow 
# and upper bound for total time constraint T
T = 8 # From the exercise 
beq = np.array([1,0,0,0,0,-1])
bub = [T]

## load bounds of arcs dimensions
bounds = np.zeros((C.shape[0])).astype(tuple)
for i in range(0, C.shape[0]):
    bounds[i] = (0,np.inf)
    
bounds = tuple(bounds)

# solve the linear prog for simplex and interior point
res = lp(C, A_eq=Aeq, b_eq=beq, A_ub=Aub, b_ub=bub, bounds=bounds)





def checkForceClosure(bodies, contacts):
    """Checks planar force closure given a set of objects and contact information

    :param masses: List of object bodies. Each element should be of format (ID,x,y,mass)
                    i.e. 
                        ID : Body ID (0 to N-1 if the number of bodies are N)
                        (x,y): Location of mass center
                        mass : Mass of the body in kg
    :param contacts: List contact points. Each element should be of format (a,b,x,y,theta,u)
                    i.e. 
                        a,b : Bodies in contact
                        (x,y) : contact location
                        theta : of the contact normal specified in radians 
                                (Contact normal is into body a)
                        u : Coulomb Friction coefficient
    :return result: A logical value where TRUE means force closure
    :return output: Solution of the linear programming problem, if force closure is true.
                    Else null list.

    Examples 
    ========    
    Input:
        bodies = [[1, 25, 35, 2], [2, 66, 42, 10]]
        contacts = [[1, 2, 60, 60, 3.1416, 0.5], [1, 0, 0, 0, 1.5708, 0.5], [2, 0, 60, 0, 1.5708, 0.5], [2, 0, 72, 0, 1.5708, 0.5]]
    Output:
        Force closure successfull. k vector: [ 0.00, 6.21, 3.42, 15.84, 15.04, 12.91, 48.62, 38.34 ]

    Input:
        bodies = [[1, 25, 35, 2], [2, 66, 42, 5]]
        contacts = [[1, 2, 60, 60, 3.1416, 0.5], [1, 0, 0, 0, 1.5708, 0.1], [2, 0, 60, 0, 1.5708, 0.5], [2, 0, 72, 0, 1.5708, 0.5]]
    Output:
        Force closure fail
    """

    bodyCount = len(
        bodies
    ) + 1  # Body 0 (Stationary Ground) in not provided as a seperate input item
    contactCount = len(contacts)
    wrenchCount = contactCount * 2

    # Generating Ext Wrench matrix (Body Weight Wrenches) Fext : (Mz, Fx, Fy)
    FextList = [0 for x in range(bodyCount)]
    for body in bodies:
        ID, x, y, mass = body

        point = [x, y]
        theta = 3 / 2 * math.pi  # Gravity acts downwards - 270 degrees
        force = mass * 10  # Gravitational acceleration taken as 10 m/s2
        FextList[ID] = forceToWrench(point, theta, force)

    # Generating List of contact wrench matrices. Each element F  : (Mz, Fx, Fy)
    Flist = [[[0 for x in range(wrenchCount)] for y in range(3)]
             for z in range(bodyCount)]
    for contactID in range(contactCount):
        bodyA, bodyB, x, y, theta, u = contacts[contactID]
        frictionAngle = math.atan(u)

        # Friction cone left edge
        WrenchL = forceToWrench((x, y), theta + frictionAngle)
        wrenchLID = contactID * 2
        # Friction cone right edge
        WrenchR = forceToWrench((x, y), theta - frictionAngle)
        wrenchRID = (contactID * 2) + 1
        # bodyA : Contact normal is into body A
        for var in range(3):
            Flist[bodyA][var][wrenchLID] = WrenchL[var]
            Flist[bodyA][var][wrenchRID] = WrenchR[var]
        # bodyB : Contact normal is out of body B
        for var in range(3):
            Flist[bodyB][var][wrenchLID] = -WrenchL[var]
            Flist[bodyB][var][wrenchRID] = -WrenchR[var]

    # Remove Stationary ground from Wrench lists
    FextList.pop(0)
    Flist.pop(0)

    f = [1 for x in range(wrenchCount)]
    A = [[0 for x in range(wrenchCount)] for y in range(wrenchCount)]
    for x in range(wrenchCount):
        A[x][x] = -1
    b = [0 for x in range(contactCount * 2)]

    Aeq = []
    for F_ in Flist:
        Aeq = Aeq + F_
    beq = []
    for b_ in FextList:
        beq = beq + b_
    beq = map(lambda x: -x, beq)  # Since Fk = -Fext

    lpOutput = lp(f, A, b, Aeq, beq, method='interior-point')
    k = []
    result = lpOutput.get("success")
    if result:
        k = lpOutput.get("x")
    return result, k
    def fit_hard(self, trainx, trainy):
        """
        Hard margin case (mainly for testing):
        Compute the optimal weight vector to classify (trainx, trainy) using the
        Scipy function 'linprog'.

        The variable assignment for l = 4 samples is given by

        x = (alpha_1, alpha_2, alpha_3, alpha_4, b, eps)

        leading to l + 2 = 6 variables.

        Parameters
        ----------
        trainx : numpy array of floats, num_train_samples-by-num_features
                 Input training samples

        trainy : list of numpy array of floats or integers num_train_samples-by-one
                 Input training labels

        Returns
        -------
        self : object

        """
        print "Fitting hard margin using Scipy linprog...\n"
        [self.num_train_samples, self.num_features] = trainx.shape

        # trainx and testx in 'predict', trainy for kernel, and testy for 'plot2d'
        self.trainx = trainx
        self.trainy = trainy

        # Constraints from data (halfspaces), this is the transpose of K
        Ktraintrans = get_label_adjusted_train_kernel(trainx,
                                                      trainy,
                                                      kernel=self.kernel,
                                                      degree=self.degree,
                                                      gamma=self.gamma,
                                                      coef0=self.coef0)

        # Objective function
        c = np.vstack((np.zeros((self.num_train_samples + 1, 1)), 1)).flatten()

        a_ub = np.hstack((-Ktraintrans, -np.ones((self.num_train_samples, 1))))
        # b_ub = np.zeros((self.num_train_samples, 1))
        b_ub = np.zeros(self.num_train_samples)
        lb = np.hstack((-np.ones(self.num_train_samples + 1), -1e9))
        ub = np.hstack((np.ones(self.num_train_samples + 1), 1e9))

        # Scipy lp solver: use bland option?)
        result = lp(c=c,
                    A_ub=a_ub,
                    b_ub=b_ub,
                    bounds=zip(lb, ub),
                    options=dict(bland=True))

        if result.message == 'Optimization failed. Unable to ' \
                             'find a feasible starting point.':
            print result
        if result.status > 0:
            print 'Scipy linprog did not terminate successfully. status = %d' \
                  % result.status

        weight_opt = result.x
        self.weight_opt = weight_opt[:-1]
        self.eps_opt = weight_opt[-1]
        self.fun_opt = result.fun
        if np.abs(self.eps_opt - self.fun_opt) >= 0.00001:
            warnings.warn('\neps_opt is not identical to fun_opt. eps_opt - fun_opt = %0.6f. ' \
                          % (self.eps_opt - self.fun_opt))
        if np.abs(self.eps_opt) <= 0.00001:
            warnings.warn('\neps_opt is close to zero. Data not separable. ')
Exemplo n.º 11
0
    # 	[Wheat.water_use_ML_per_Ha, Canola.water_use_ML_per_Ha, Tomato.water_use_ML_per_Ha],
    # ]

    A_ub = [
        Wheat.water_use_ML_per_Ha, Canola.water_use_ML_per_Ha,
        Tomato.water_use_ML_per_Ha
    ]

    #Constraints to producing negated profit
    b_ub = [water_entitlement]

    bounds = [(0, area), (0, area), (0, area)]

    res = lp(c,
             A_eq=A_eq,
             b_eq=b_eq,
             A_ub=A_ub,
             b_ub=b_ub,
             bounds=bounds,
             options={"disp": True})

    print(res)

    print c
    print A_ub
    print b_ub

    # print (tomato_cost - tomato_profit) * area
    # print (wheat_cost - wheat_profit) * area
    # print (canola_cost - canola_profit) * area
Exemplo n.º 12
0
    def update_bid(self):
        # Define the number of steps the perfect foresight optimization should look in the future.
        n_step = self.forecast_horizon

        if self.bidding_solver == "linprog":
            """ Linear program """

            # To derive the bid for this time step, a linear optimization (linprog) determines the optimal amount of
            # hydrogen that should be produced depending on the electricity price and the demand for the next ?2 weeks?.
            # This requires perfect foresight and in order to formulate a linear optimization problem, the temperature
            # dependent electrolyzer efficiency is not taken into account.
            #
            # The optimization problem is formulated in the way:
            # min  c * x
            # s.t. A * x <= b
            # x >= 0 and x < max. producible hydrogen
            #
            # Here, x is a vector with the amount of hydrogen produced each time step, c is the estimated cost function
            # for each time step (EEX spot marked costs are used), A * x <= b is used to make sure that the storage
            # never falls below the min. storage level (safety buffer).
            # Number of time steps of the future used for the optimization.

            # Define the electricity costs [EUR/kWh].
            c = self.model.data.utility_pricing_profile[self.current_step:self.
                                                        current_step + n_step]
            # Define the inequality matrix (A) and vector (b) that make sure that at no time step the storage is below
            # the wanted buffer value.
            # The matrix A is supposed to sum up all hydrogen produced for each time step, therefore A is a lower
            # triangular matrix with all entries being -1 (- because we want to make sure that the hydrogen amount does
            # not fall below a certain amount, thus the <= must be turned in a >=, therefore A and b values are all set
            # negative).
            A = [[-1] * (i + 1) + [0] * (n_step - i - 1)
                 for i in range(n_step)]
            # Append A by the negative of itself to set the boundaries that the storage cannot be more than full.
            A_append = [[1] * (i + 1) + [0] * (n_step - i - 1)
                        for i in range(n_step)]
            A += A_append
            # The b value is the sum of the demand for each time step (- because see comment above).
            cumsum_h2_demand = self.h2_demand[self.
                                              current_step:self.current_step +
                                              n_step]
            cumsum_h2_demand = [-float(x) for x in cumsum_h2_demand]

            # Accumulate all demands over time.
            cumsum_h2_demand = np.cumsum(cumsum_h2_demand).tolist()
            # Now the usable hydrogen is added to all values of b except the last one. This allows stored hydrogen to be
            # used but will force the optimization to have at least as much hydrogen stored at the end of the looked at
            # time frame as there is now stored.
            b = [
                x + self.stored_hydrogen - self.storage_buffer
                for x in cumsum_h2_demand
            ]
            b_append = [-x + self.storage_size for x in b]
            b[-1] -= self.stored_hydrogen - self.storage_buffer
            b += b_append
            # Define the bounds for the hydrogen produced.
            x_bound = ((0, self.max_production_per_step), ) * n_step
            # Do the optimization with linprog.
            opt_res = lp(c, A, b, method="interior-point", bounds=x_bound)
            # Return the optimal value for this time slot [kg]
            if opt_res.success:
                opt_production = opt_res.x.tolist()
            else:
                # Case: Linprog couldn't derive optimal result, thus produce as much H2 as possible.
                opt_production = [self.max_production_per_step]
            # print("Electrolyzer bidding - Optimization success is {}".format(opt_res.success))
            electrolyzer_log.info(
                "Electrolyzer bidding - Optimization success is {}".format(
                    opt_res.success))

        elif self.bidding_solver == "quadprog":
            """ Quadratic program """
            from cvxopt import matrix, solvers
            # The optimization problem is formulated in the way:
            # min  0.5 x^T * P * x + q^T * x
            # s.t. G * x <= d
            # x >= 0 and x < max. producible hydrogen

            # Define the electricity costs [EUR/kWh].
            c = self.model.data.utility_pricing_profile[self.current_step:self.
                                                        current_step + n_step]
            # The quadratic matrix P is a diagonal matrix containing the values of c on the diagonal.
            # Create an eye matrix with the size of c.
            P = np.eye(len(c))
            # Multiply c with the eye matrix and convert the matrix back to a list.
            P = P * c

            # q is a vector consisting of 1.5 * max_production_per_step / 0.4 * 2 * c. The formula is derived by the
            # assumption, that the electrolyzer cell voltage rises linearly from 1.5 V when off to 1.9 V when on max.
            # power. The costs are the energy needed multiplied by the energy costs, which can be boiled down to the
            # form (w/o constants) C = (1.5 + 0.4 x / x_max) * x * c, where x_max is the max. H2 production per step.
            # Hereof the quadratic formulation can be derived.
            q = [
                1.5 * self.max_production_per_step / 0.4 * 2 * cost
                for cost in c
            ]

            # Define the inequality matrix (A) and vector (b) that make sure that at no time step the storage is below
            # the wanted buffer value.
            # The matrix A is supposed to sum up all hydrogen produced for each time step, therefore A is a lower
            # triangular matrix with all entries being -1 (- because we want to make sure that the hydrogen amount does
            # not fall below a certain amount, thus the <= must be turned in a >=, therefore A and b values are all set
            # negative).
            A = [[-1.0] * (i + 1) + [0.0] * (n_step - i - 1)
                 for i in range(n_step)]
            # Append A by the negative of itself to set the boundaries that the storage cannot be more than full.
            A_append = [[1.0] * (i + 1) + [0.0] * (n_step - i - 1)
                        for i in range(n_step)]
            A += A_append
            # The constraints that x can only be between 0 and max. production have to be inserted via the matrix A.
            A += np.eye(n_step).tolist()
            eye_neg = -np.eye(n_step)
            A += eye_neg.tolist()

            A = np.array(A).T.tolist()
            # The b value is the sum of the demand for each time step (- because see comment above).
            cumsum_h2_demand = self.h2_demand[self.
                                              current_step:self.current_step +
                                              n_step]
            cumsum_h2_demand = [-float(x) for x in cumsum_h2_demand]

            # Accumulate all demands over time.
            cumsum_h2_demand = np.cumsum(cumsum_h2_demand).tolist()
            # Now the usable hydrogen is added to all values of b except the last one. This allows stored hydrogen to be
            # used but will force the optimization to have at least as much hydrogen stored at the end of the looked at
            # time frame as there is now stored.
            """ IDEA: Maybe the goal should be to have a filling level of half the storage at the end of the opt. """
            b = [
                x + self.stored_hydrogen - self.storage_buffer
                for x in cumsum_h2_demand
            ]
            b_append = [-x + self.storage_size for x in b]
            b[-1] -= self.stored_hydrogen - self.storage_buffer
            b += b_append
            # Set the x boundaries (0 <= x <= max. H2 production per step).
            b += [self.max_production_per_step for _ in range(n_step)]
            b += [0 for _ in range(n_step)]

            # Convert all the lists needed to cvxopt matrix format.
            P = matrix(P)
            q = matrix(q)
            G = matrix(A)
            d = matrix(b)

            # Silence the optimizer output.
            solvers.options['show_progress'] = False
            # Do the optimization with linprog.
            opt_res = solvers.qp(P, q, G, d)
            # Transform cvxopt matrix format to list.
            if opt_res['status'] == 'optimal':
                opt_production = np.array(opt_res['x']).tolist()
                opt_production = [x[0] for x in opt_production]
            else:
                opt_production = [self.max_production_per_step]

            # Return the optimal value for this time slot [kg]
            # print("Electrolyzer bidding - Optimization status is '{}'".format(opt_res['status']))
            electrolyzer_log.info("Optimization status is '{}'".format(
                opt_res['status']))

        elif self.bidding_solver == "dummy":
            """ Return a dummy bid """
            opt_production = [0.1]
            c = [30]

        elif self.bidding_solver == "stepwise":
            # Get the amount of hydrogen missing from the storage buffer [kg].
            min_amount_needed = min(
                self.max_production_per_step,
                max(0, self.storage_buffer - self.stored_hydrogen))
            # Approximate the electricity needed to produce the missing buffer mass (assume efficiency of 65 %) [kWh].
            min_bid = min_amount_needed * 33.3 / 0.65
            # Amount of H2 that can be stored [kg]
            max_mass_storable = min(self.max_production_per_step,
                                    self.storage_size - self.stored_hydrogen)
            # Approximate the electricity buyable to fill storage (assume efficiency of 65 %) [kWh].
            max_bid = max_mass_storable * 33.3 / 0.65
            # Generate the bids.
            # Bids are in the format [price [EUR/kWh], volume[kWh], ID]
            bids = []
            if min_bid > 0:
                # Case: There is an amount that should definitely be bought.
                bids.append([0.25, min_bid, self.id])
                # Subtract the amount needed from the amount that could be bought on top of that.
                max_bid -= min_bid

            if max_bid > 0:
                # Split max bid to 4 equal sections, one for 20 ct/kWh, one for 15, 10, and 5.
                bids.append([self.stepwise_bid_price[0], max_bid / 4, self.id])
                bids.append([self.stepwise_bid_price[1], max_bid / 4, self.id])
                bids.append([self.stepwise_bid_price[2], max_bid / 4, self.id])
                bids.append([self.stepwise_bid_price[3], max_bid / 4, self.id])

            # Place the bids
            for bid in bids:
                self.model.auction.bid_list.append(bid)

            return

        else:
            """ No valid solver """
            raise ValueError('Electrolyzer: No valid solver name given.')

        # self.plot_optimization_result(opt_production, cumsum_h2_demand, c)

        # Return the energy value needed for the optimized production and the price [kWh, EUR/kWh]
        energy_demand = self.get_power_by_production(
            opt_production[0]) * self.interval_time / 60
        price = c[0]

        if energy_demand == 0:
            # Case: Do not bid.
            self.bid = None
            self.trading_state = None
        else:
            # Case: Bid on energy.
            self.bid = [price, energy_demand, self.id]
            self.trading_state = "buying"
Exemplo n.º 13
0
 def solve(self):
     res = lp(self.c, self.A_ub, self.b, self.A_eq, self.d)
     if res['status'] != 0:
         return res['message']
     return res['x'].reshape(self.shape)
Exemplo n.º 14
0
# solve the linear prog for simplex and interior point

results = []  # Empty array for values of primal equation
landas = []  # Empty array for values of lambda
tolerance = 0.01  # Level of tolerance for the difference of lambda
landa = 0.01  # Init Lambda
landaNext = 0.01  # Init lambda Next = lambda
diff = np.inf  # Init difference to infinity
i = 1  # Init counter i, 1 for the step divisor.
# Loop while the difference between lambda_i and lambda_i+1 is less than the setted tolerance
while diff > tolerance and i < 150:
    # Update Value of lambda for the lambda obtained in previous iteration
    landa = landaNext
    # Calculate the C~ = C + lambda*t  and minimize using linprog
    CplusLambdaT = C + landa * t
    res = lp(CplusLambdaT, A_eq=Aeq, b_eq=beq, bounds=bounds)
    # Calculate the primal value with the result of the minimization for plotting later
    primal = res.fun - T * landa
    # Calculate the gradient of C*X + lambda(t*x + T)
    gradient = np.dot(t, res.x) - T
    # Update Value of step, inversily proportional to the index of iteration
    step = 1 / i
    # Calculate the next lambda by moving the value "step" over the gradient
    # and compute difference between current lambda
    landaNext = landa + step * gradient
    diff = abs(landaNext - landa)
    # Update index value, and append calculated values to list for plotting later
    i += 1
    results.append(primal)
    landas.append(landa)
    print(i, step, gradient, landa, landaNext, diff)
Exemplo n.º 15
0
                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

# transform Node to Node matrix to Node Arc matrix, using the function made in basic utils EX0
NA, arcs = nodoNodoANodoArco(NN)

# load cost array of arcs dimensions
C = np.array(
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1])

# "linear equality constraint" of nodes dimensions
b = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

## load bounds of arcs dimensions
ubs = np.array([
    np.inf, np.inf, np.inf, np.inf, 10, 5, 10, 5, 5, 5, 3, 3, 6, 3, 6, 3, 3, 3,
    np.inf, np.inf, np.inf, np.inf, np.inf
])
bounds = np.zeros((C.shape[0])).astype(tuple)
for i in range(0, C.shape[0]):
    bounds[i] = (0, ubs[i])

bounds = tuple(bounds)

# solve the linear prog for simplex and interior point
res = lp(C, A_eq=NA, b_eq=b, bounds=bounds)
res_ip = lp(C, A_eq=NA, b_eq=b, bounds=bounds, method='interior-point')
Exemplo n.º 16
0
	def min_cost(self, groups=['F 19-30','M 19-30'] , **kwargs):

		if len(self.ndf) == 0 or len(self.foods_spreadsheet) == 0:
			raise Exception("Please get a nutrition dataframe/spreadsheet for food first." )

		def average_of_groups(group,table):
			if len(group) == 1:
				return table[group[0]]


			return table[group[0]] + average_of_groups(group[1:],table)

		min_table = pd.read_csv('./diet_minimums.csv').set_index('Nutrition')
		max_table = pd.read_csv('./diet_maximums.csv').set_index('Nutrition')

		min_of_groups = average_of_groups(groups,min_table) / len(groups)
		max_of_groups = average_of_groups(groups,max_table) / len(groups)

		def test1(ndf):

			ndf['NDB Quantity'] = ndf[['Quantity','Units']].T.apply(lambda x : ndb.ndb_units(x['Quantity'],x['Units']))

			ndf['NDB Price'] = ndf['Price']/ndf['NDB Quantity']

			return ndf

		df = pd.read_csv(self.foods_spreadsheet)
		df['Quantity'] = [float(i)for i in df['Quantity']]
		df['Price'] = [float(i[1:])for i in df['Price']]

		if 'price_delta' in kwargs:

			df['Price'] = df['Price'].where(df['Food'] != kwargs['price_delta'][0], df['Price']* (1 +  (kwargs['price_delta'][1])/100)  )



		df = test1(df)
		D = self.ndf



		df.dropna(how='any') 
		Prices = df.groupby('Food')['NDB Price'].min()


		regx1 = "[fFmM]{1}"
		regx2 = r"[^\w ]+"

		tol = 1e-6 # Numbers in solution smaller than this (in absolute value) treated as zeros

		c = Prices.apply(lambda x:x.magnitude).dropna()

		# Compile list that we have both prices and nutritional info for; drop if either missing
		# Drop nutritional information for foods we don't know the price of,
		# and replace missing nutrients with zeros.
		Aall = D[c.index].fillna(0)


		# Drop rows of A that we don't have constraints for.
		Amin = Aall.loc[min_of_groups.index]
		Amax = Aall.loc[max_table.index]

		# Minimum requirements involve multiplying constraint by -1 to make <=.
		A = pd.concat([-Amin,Amax])
		b = pd.concat([-min_of_groups,max_of_groups]) # Note sign change for min constraints



		result = lp(c, A, b, method='interior-point',options = {"presolve":False,'maxiter':5000.0})

		# Put back into nice series
		diet = pd.Series(result.x,index=c.index)


		if 'price_delta' not in kwargs:
			self.ot = str(result.fun)

		self.result_comp = diet[diet >= tol]
		self.result_cost = str(result.fun)





		temp1 = 'the average of the (' + ', '.join(groups) + ')' if len(groups) > 1 else groups[0]

		print("Cost of diet for %s is $%4.2f per day." % (temp1,result.fun))
		print("\nYou'll be eating (in 100s of grams or milliliters):")
		print(diet[diet >= tol])  # Drop items with quantities less than precision of calculation

		tab = pd.DataFrame({"Outcome":np.abs(A).dot(diet),"Recommendation":np.abs(b)})

		print("\nWith the following nutritional outcomes of interest:")
		print(tab)
		print("\nConstraining nutrients are:")

		excess = tab.diff(axis=1).iloc[:,1]

		print(excess.loc[np.abs(excess) < tol].index.tolist())
# Coeficientes de Función
cFuncionMin = [1, 1] # 1x + 1y

# Coeficientes de Restricciones
aRestricciones = [[50, 24], [30, 33]] # [50x + 24y], [30x + 33y]
bRestricciones = [2400, 2100]

# Límites
limitesX = (0, None)
limitesY = (0, None)

#%% PROGRAMA
# Configuraciones para ejecutar algoritmo de optimización
cFuncionMax = -np.array(cFuncionMin) # Maximización en lugar de minimización
limites = (limitesX, limitesY) 

# Ejecución de optimización
solucion = lp(cFuncionMax, A_ub = aRestricciones, b_ub = bRestricciones, bounds = limites)
funcionReal = - solucion.fun # Ocurrió una maximización en lugar de una minimización

#%% IMPRESIÓN DE RESULTADOS
print()
print('Resultado de ejecución:')
print(solucion)
print()

print('Solución:')
print('X = {:.4f}'.format(solucion.x[0]))
print('Y = {:.4f}'.format(solucion.x[1]))
print('Función evaluada en solución: {:.4f}'.format(funcionReal))
Exemplo n.º 18
0
# Drop nutritional information for foods we don't know the price of,
# and replace missing nutrients with zeros.
Aall = D[c.index].fillna(0)

# Drop rows of A that we don't have constraints for.
Amin = Aall.loc[bmin.index]

Amax = Aall.loc[bmax.index]

# Minimum requirements involve multiplying constraint by -1 to make <=.
A = pd.concat([-Amin, Amax])

b = pd.concat([-bmin, bmax])  # Note sign change for min constraints

# Now solve problem!
result = lp(c, A, b, method='interior-point')

# Put back into nice series
diet = pd.Series(result.x, index=c.index)
'''
 print("Cost of diet for %s is $%4.2f per day." % (group,result.fun))
 print("\nYou'll be eating (in 100s of grams or milliliters):")
 print(diet[diet >= tol])  # Drop items with quantities less than precisionof calculation.'''

tab = pd.DataFrame({
    "Outcome": np.abs(A).dot(diet),
    "Recommendation": np.abs(b)
})
'''print("\nWith the following nutritional outcomes of interest:")
print(tab)
Exemplo n.º 19
0
def lp_solve(date, cur_est_rets, limit_factors, cur_benchmark_wt, industry_map, stocks_in_index_wei=1, num_multi=5):
    """
    线性规划计算函数:
    输入:截面预期收益,约束条件(风险因子),截面标的指数成分股权重,个股权重约束倍数
    输出:经优化后的组合内个股权重
    """

    '''
    scipy.optimize.linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='interior-point',
                           callback=None, options=None, x0=None)
    minimize:
        c @ x
    such that:
        A_ub @ x <= b_ub
        A_eq @ x == b_eq
        lb <= x <= ub
    '''
    # b_eq是基准的风险暴露,通过是组合的风险暴露与基准的风险暴露相同,达到风险因子中性、行业中性的目的
    # np.r_ 是按列拼接成一个maxic,就是在风险因子权重等于指数风险因子权重,且权重和为1。

    # 行业虚拟变量
    dummies = pd.get_dummies(industry_map[industry_map.columns[0]])
    # 合并
    data = pd.concat([cur_est_rets, limit_factors, cur_benchmark_wt, dummies], axis=1)

    # 仅在成份股中选股
    if stocks_in_index_wei == 1:
        # 把不在成份股的都删掉了
        data = data.dropna(how='any', axis=0)
        cur_est_rets, limit_factors, cur_benchmark_wt, dummies = (data[cur_est_rets.columns], data[limit_factors.columns],
                                                                  data[cur_benchmark_wt.columns], data[dummies.columns])
        cur_benchmark_wt = cur_benchmark_wt / cur_benchmark_wt.sum()

        c = cur_est_rets.values.flatten()
        A_ub = None
        b_ub = None
        A_eq = np.r_[limit_factors.T.values, dummies.T.values, np.repeat(1, len(limit_factors)).reshape(1, -1)]
        b_eq = np.r_[np.dot(limit_factors.T, cur_benchmark_wt), np.dot(dummies.T, cur_benchmark_wt),
                     np.array([1]).reshape(-1, 1)]

        np.repeat(1, 6).reshape(1, -1)

        # 股票权重的bounds,最小是0,最大是指数权重的5倍。
        bounds = tuple([(0, num_multi * wt_in_index) for wt_in_index in cur_benchmark_wt.values])
        try:
            res = lp(-c, A_ub, b_ub, A_eq, b_eq, bounds)
        except Exception as e:
            res = lp(-c, A_ub, b_ub, A_eq, b_eq, bounds, method='interior-point')
            print('{}简单方法未解出来'.format(date))
        cur_wt = pd.Series(res.x, index=cur_est_rets.index)

        # (cur_wt > 0).sum()
        # (cur_wt > 0.01).sum()

    # 可以选出部分的非成份股
    else:
        # 把不在成份股的股票权重nan部分变为0,这样后面就不会被删除
        data[cur_benchmark_wt.columns] = data[cur_benchmark_wt.columns].fillna(0)
        # 删除其他的nan
        data = data.dropna(how='any', axis=0)
        cur_est_rets, limit_factors, cur_benchmark_wt, dummies = (data[cur_est_rets.columns], data[limit_factors.columns],
                                                                  data[cur_benchmark_wt.columns], data[dummies.columns])

        cur_benchmark_wt = cur_benchmark_wt / cur_benchmark_wt.sum()
        # 判断是否为成份股
        not_in_benchmark = deepcopy(cur_benchmark_wt)
        not_in_benchmark[cur_benchmark_wt == 0.0] = 1
        not_in_benchmark[cur_benchmark_wt != 0.0] = 0

        c = cur_est_rets.values.flatten()
        A_ub = not_in_benchmark.T.values
        b_ub = np.array([1 - stocks_in_index_wei])

        A_eq = np.r_[limit_factors.T.values, dummies.T.values, np.repeat(1, len(limit_factors)).reshape(1, -1)]
        b_eq = np.r_[np.dot(limit_factors.T, cur_benchmark_wt), np.dot(dummies.T, cur_benchmark_wt),
                     np.array([1]).reshape(-1, 1)]

        # 得到行业权重
        tmp = pd.concat([cur_benchmark_wt, industry_map], axis=1).fillna(0)
        grouped = tmp.groupby(tmp.columns[-1])
        tmp_indus_wei = pd.Series()
        for k, v in grouped:
            su = v[cur_benchmark_wt.columns].sum().values[0]
            tmp_indus_wei[k] = su
        tmp['indus_wei'] = None
        for i in range(0, len(tmp.index)):
            tmp.loc[tmp.index[i], 'indus_wei'] = tmp_indus_wei[tmp.loc[tmp.index[i], '申万一级行业']]
        bounds_tmp = []
        for v in tmp['indus_wei'].values:
            if v > 0:
                bounds_tmp.append([(0,  v/3)])
            else:
                bounds_tmp.append([(0, 0.0001)])
        bounds1 = tuple(bounds_tmp)

        bounds = tuple([(0, 1) for i in cur_benchmark_wt.values])

        res = lp(-c, A_ub, b_ub, A_eq, b_eq, bounds, method='interior-point')
        res.x
        cur_wt11 = pd.Series(res.x, index=cur_est_rets.index)
        (cur_wt11 > 0).sum()
        (cur_wt11 > 0.001).sum()
        (cur_wt11 > 0.03).sum()

        cur_wt11.sum()

        res = lp(-c, A_ub, b_ub, A_eq, b_eq, bounds)
        cur_wt = pd.Series(res.x, index=cur_est_rets.index)

    return cur_wt