def main(input_path="ubicaciones.csv",
         balance_deviations=[0.1, 0.15, 0.2, 0.3]):
    """
    Use different balance deviations to test complex lp function to minimize.
    Args:
        input_path : csv path with information regarding agencies, frequency, volume and coordinates
        balance_deviations = list with percentage of deviations, for calaculation refer to stops_gap and items_gap
    Returns:
        None
    """
    df = pd.read_csv(input_path)
    df.loc[df[df["Vol_Entrega"] == 0].index, "Vol_Entrega"] = 1

    zones = ["D1", "D2", "D3", "D4", "D5", "D6"]
    agencies = list("A" + df["Id_Cliente"].astype(str))
    vol_delivery = list(df["Vol_Entrega"])
    vol_stores = list(df["Vol_Entrega"] * df["Frecuencia"])
    frequency = list(df["Frecuencia"])
    stores_volume = dict(zip(agencies, vol_stores))
    stores_frequency = dict(zip(agencies, frequency))
    vol_delivery = dict(zip(agencies, vol_delivery))

    scaler = MinMaxScaler()
    fitted_scaler = scaler.fit(df[["lat", "lon"]])
    scaled_coordinates = fitted_scaler.transform(df[["lat", "lon"]])

    kmeans = KMeansConstrained(n_clusters=6,
                               size_min=604,
                               size_max=605,
                               random_state=12,
                               n_init=100,
                               max_iter=200,
                               n_jobs=-1)
    kmeans_values = kmeans.fit(scaled_coordinates)
    df["kmeans"] = list(kmeans.predict(scaled_coordinates))

    vectorized_lat_lon = df[["lat", "lon"]].to_numpy()
    cluster_centers = fitted_scaler.inverse_transform(kmeans.cluster_centers_)
    distance_matrix = cdist(cluster_centers,
                            vectorized_lat_lon,
                            metric="cityblock")

    routes = [(z, a) for z in zones for a in agencies]
    distances = pulp.makeDict([zones, agencies], distance_matrix, 0)
    flow = pulp.LpVariable.dicts("Distribution", (zones, agencies), 0, None)
    using = pulp.LpVariable.dicts("BelongstoZone", (zones, agencies), 0, 1,
                                  pulp.LpInteger)

    for percentage in balance_deviations:
        prob = pulp.LpProblem("BrewingDataCup2020_" + str(percentage),
                              pulp.LpMinimize)
        prob += pulp.lpSum([
            distances[z][a] * flow[z][a] for (z, a) in routes
        ]) + pulp.lpSum([distances[z][a] * using[z][a]
                         for (z, a) in routes]), "totalCosts"
        stops_upper, stops_lower = stops_gap(percentage)
        distr_upper, distr_lower = items_gap(percentage)
        for z in zones:
            prob += pulp.lpSum([using[z][a] for a in agencies
                                ]) <= stops_upper, "SumStopsInZoneUpper %s" % z
            prob += pulp.lpSum([using[z][a] for a in agencies
                                ]) >= stops_lower, "SumStopsInZoneLower %s" % z
            prob += pulp.lpSum([flow[z][a] for a in agencies
                                ]) <= distr_upper, "SumDistrInZoneUpper %s" % z
            prob += pulp.lpSum([flow[z][a] for a in agencies
                                ]) >= distr_lower, "SumDistrInZoneLower %s" % z
        for z in zones:
            for a in agencies:
                prob += flow[z][a] - (100000 * using[z][a]) <= 0
                prob += flow[z][a] <= vol_delivery[a]
        for a in agencies:
            prob += pulp.lpSum([flow[z][a] for z in zones
                                ]) >= stores_volume[a], "Distribution %s" % a
            prob += pulp.lpSum([
                using[z][a] for z in zones
            ]) == stores_frequency[a], "FrequencyDistribution %s" % a

        prob.writeLP("lp_files/milp_brewing_" + str(percentage) + ".lp")
        solver = pulp.CPLEX_CMD(path=path_to_cplex)
        prob.solve(solver)
        print("Estado: ", pulp.LpStatus[prob.status])
        print("Total Cost: ", pulp.value(prob.objective))

        final_df = pd.DataFrame(columns=["D1", "D2", "D3", "D4", "D5", "D6"],
                                index=(range(1, 3626)))
        final_distr = dict()
        for v in prob.variables():
            if (v.name).find("BelongstoZone_") == 0:
                if v.varValue > 0:
                    dist = v.name[14:]
                    zone = dist[:2]
                    id_cliente = int(dist[4:])
                    final_df.loc[id_cliente, zone] = 1

        final_df.fillna(0, inplace=True)
        final_df = final_df.astype(int).reset_index().rename(
            columns={"index": "Id_Cliente"})
        final_df.to_csv("lp_solutions/cplex_opt_" + str(percentage) + "_" +
                        str(pulp.value(prob.objective)) + ".csv",
                        header=True,
                        index=False)
示例#2
0
def test_float_precision():
    km = KMeansConstrained(n_init=1, random_state=30)

    inertia = {}
    X_new = {}
    centers = {}

    for dtype in [np.float64, np.float32]:
        X_test = X.astype(dtype)
        km.fit(X_test)
        # dtype of cluster centers has to be the dtype of the input
        # data
        assert_equal(km.cluster_centers_.dtype, dtype)
        inertia[dtype] = km.inertia_
        X_new[dtype] = km.transform(X_test)
        centers[dtype] = km.cluster_centers_
        # ensure the extracted row is a 2d array
        assert_equal(km.predict(X_test[:1]),
                     km.labels_[0])
        if hasattr(km, 'partial_fit'):
            km.partial_fit(X_test[0:3])
            # dtype of cluster centers has to stay the same after
            # partial_fit
            assert_equal(km.cluster_centers_.dtype, dtype)

    # compare arrays with low precision since the difference between
    # 32 and 64 bit sometimes makes a difference up to the 4th decimal
    # place
    assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
                              decimal=4)
    assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
                              decimal=4)
    assert_array_almost_equal(centers[np.float32], centers[np.float64],
                              decimal=4)
示例#3
0
def test_predict():
    km = KMeansConstrained(n_clusters=n_clusters, random_state=42)

    km.fit(X)

    # sanity check: predict centroid labels
    pred = km.predict(km.cluster_centers_)
    assert_array_equal(pred, np.arange(n_clusters))

    # sanity check: re-predict labeling for training set samples
    pred = km.predict(X)
    assert_array_equal(pred, km.labels_)

    # re-predict labels for training set using fit_predict
    pred = km.fit_predict(X)
    assert_array_equal(pred, km.labels_)
示例#4
0
from k_means_constrained import KMeansConstrained


df=pd.read_csv("https://raw.githubusercontent.com/JavierLilly/Proyecto_Eco/main/BDC_DATA.csv")

#Estandarizando las coordenadas
data= df[['lat','lon']].values.astype('float32',copy=False)
scaler = StandardScaler().fit(data)
data_scal = scaler.transform(data)
df_ = df
df_[['lat','lon']]=data_scal

#Construyendo el modelo de clustering min - max size
coor = df_[['lat','lon']]
model = KMeansConstrained(n_clusters=6,size_min=600,size_max=700,random_state=5565280).fit(coor)
y = model.predict(coor) # Predicion
df_['cluster'] = y
 
#Gráfica Todos con Frecuencia >=1 
cdict={0:'red',1:'black',2:'yellow',3:'green',4:'blue',5:'grey'}
plt.figure(figsize=(10,10))
sns.set()
for g in np.unique(y):
  plt.scatter(coor['lat'][y==g], coor['lon'][y==g], c = cdict[g], label = g, s = 60)
# plt.scatter(df['lat'][df['Frecuencia']==2],df['lon'][df['Frecuencia']==2],c='purple',s=80,alpha = .5)
# plt.scatter(df['lat'][df['Frecuencia']==3],df['lon'][df['Frecuencia']==3],c='brown',s=150,)

plt.legend()

#Reducimos los datos
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 19:13:48 2020

@author: lcota
"""

from k_means_constrained import KMeansConstrained

clf = KMeansConstrained(n_clusters=2, size_min=2, size_max=5, random_state=0)
clf.fit(X)

clf.cluster_centers_
clf.predict([[0, 0], [4, 4]])