Ejemplo n.º 1
0
def init_solution(C, df_items, df_orders, p_max):
    jobs = []
    sd_p = [0] * p_max
    for p in range(p_max):
        jobs.append(
            Picker(p, batches=[Batch(b=0, sd=0, pt=0, weight=0, orders=[])]))
    for row in df_orders.itertuples():
        order1, weight, pt = row.Index, row.weight, row.pt
        for picker in jobs:
            batch = picker.batches[-1]
            # 如果可以分配给last position batch, sd_p就是last batch 的sd
            if batch.weight + weight <= C:
                # CASE 1
                sd_p[picker.p] = (batch.sd, 1)
            else:
                # CASE 2
                ct = batch.sd + batch.pt
                sd_p[picker.p] = (ct, 2)
        p_star = sd_p.index(min(sd_p))
        case = sd_p[p_star][1]
        if case == 2:
            prev = jobs[p_star].batches[-1]
            sd = prev.sd + prev.pt
            b = len(jobs[p_star].batches)
            jobs[p_star].batches.append(
                Batch(b=b, sd=sd, pt=0, weight=0, orders=[]))
        batch = jobs[p_star].batches[-1]
        batch.orders.append(order1)
        # 一个batch里的所有orders需要合并在一起计算routing time,而不是pt单纯地相加!
        batch.routing_time(df_items)
        batch.weight += weight
    return jobs
Ejemplo n.º 2
0
def init_solution(C, df_items, df_orders, p_max):
    jobs = []
    sd_p = [0] * p_max
    for p in range(p_max):
        jobs.append(
            Picker(p, batches=[Batch(b=0, sd=0, pt=0, weight=0, orders=[])]))
    for row in df_orders.itertuples():
        order1, weight, pt = row.Index, float(row.weight), row.pt
        for picker in jobs:
            batch = picker.batches[-1]
            # 如果可以分配给last position batch, sd_p就是last batch 的sd
            ct = batch.sd + batch.pt
            sd_p[picker.p] = ct
        p_star = sd_p.index(min(sd_p))
        prev = jobs[p_star].batches[-1]
        sd = prev.sd + prev.pt
        b = len(jobs[p_star].batches)
        jobs[p_star].batches.append(
            Batch(b=b, sd=sd, pt=0, weight=0, orders=[]))
        batch = jobs[p_star].batches[-1]
        batch.orders.append(order1)
        # 一个batch里的所有orders需要合并在一起计算routing time,而不是pt单纯地相加!
        batch.routing_time(df_items)
        batch.weight += weight
    # print('ok')
    for p in jobs:
        p.re_routing(df_items)
    order_lst = list(df_orders.index)
    s_inc = jobs
    while len(order_lst):
        order_min = order_lst[0]
        w1 = df_orders.loc[order_min, 'weight']
        order_lst.remove(order_min)
        pre_tard = tard(df_orders, s_inc)[0]
        p1, b1 = find_order(s_inc, order_min)
        pre_tard, n_star, s_inc = push_in(C, b1, df_items, df_orders, s_inc,
                                          order_lst, p1, pre_tard, w1)
        if n_star != -1:
            order_lst.remove(n_star)
        while s_inc[p1].batches[b1].weight < C and pre_tard > 0:
            pre_tard, n_star, s_inc = push_in(C, b1, df_items, df_orders,
                                              s_inc, order_lst, p1, pre_tard,
                                              s_inc[p1].batches[b1].weight)
            if n_star != -1:
                order_lst.remove(n_star)
    # print('ok')

    return s_inc
Ejemplo n.º 3
0
def run(p_max, N, C, mtcr):
    N = 15
    C = 7
    # modified traffic congestion rates
    p_max = 2
    mtcr = 0.7
    # df_items = prod_order(n=N)
    df_items = pd.read_csv(r'./data/orders15.csv')
    # # 采用不同的Routing strategy会产生不同的路径
    # # 采用S-shape策略,分奇数通道与偶数通道两种情况处理
    # df_orders = prod_due_dates(df_items, mtcr, p_max)
    df_orders = pd.read_csv('./data/due_dates0.7.csv', index_col=0)
    df_orders = df_orders.sort_values(by=['dt'], ascending=True)

    # 采用Earliest Start Date方法生成初始解
    # 还要计算出Tardiness.
    jobs = []
    sd_p = [0] * p_max
    for p in range(p_max):
        jobs.append(
            Picker(p, batches=[Batch(b=0, sd=0, pt=0, weight=0, orders=[])]))
    for row in df_orders.itertuples():
        order1, weight, pt = row.Index, row.weight, row.pt
        for picker in jobs:
            batch = picker.batches[-1]
            # 如果可以分配给last position batch, sd_p就是last batch 的sd
            if batch.weight + weight <= C:
                # CASE 1
                sd_p[picker.p] = (batch.sd, 1)
            else:
                # CASE 2
                ct = batch.sd + batch.pt
                sd_p[picker.p] = (ct, 2)
        p_star = sd_p.index(min(sd_p))
        case = sd_p[p_star][1]
        if case == 2:
            prev = jobs[p_star].batches[-1]
            sd = prev.sd + prev.pt
            b = len(jobs[p_star].batches)
            jobs[p_star].batches.append(
                Batch(b=b, sd=sd, pt=0, weight=0, orders=[]))
        batch = jobs[p_star].batches[-1]
        batch.orders.append(order1)
        # 一个batch里的所有orders需要合并在一起计算routing time,而不是pt单纯地相加!
        batch.routing_time(df_items)
        batch.weight += weight
    # with open(r'./data/jobs.pkl', 'wb') as file:
    #     pickle.dump(jobs, file)
    # 已经产生initial solutions,下一步产生临域解,先考虑bsw2
    # 可以将其看作Picker的一个方法,Picker与另外一个Picker交换Batch
    # tardy_jobs, tardiness = tard(df_orders, jobs)

    s_inc = jobs
    l = 1
    tardy_pair_inc = tard(df_orders, s_inc)
    while l <= 5:
        tardy_pair_inc = tard(df_orders, s_inc)
        print(p_max, N, C, mtcr, tardy_pair_inc)
        neighbors = neighbor_l(s_inc, df_items, df_orders, C, l=l)
        f_s = {}
        for solution in neighbors:
            tardy_pair = tard(df_orders, solution)
            f_s[tardy_pair] = solution
        if f_s:
            tardy_pair_star = min(f_s, key=lambda x: (x[0], x[1]))
            s_star = f_s[tardy_pair_star]
            if tardy_pair_star < tardy_pair_inc:
                s_inc = s_star
                l = 1
            else:
                l += 1
        else:
            l += 1
    return tardy_pair_inc, s_inc
Ejemplo n.º 4
0
# Optimize model
m._vars = vars
m.params.LazyConstraints = 1
m.optimize(add_ct)

solution = m.getAttr('x', vars)
selected = [(j, p, k) for p in range(p_max) for k in range(K) for j in range(n)
            if solution[j, p, k] > 0.5]
# assert len(subtour(selected)) == n
print('Optimal cost: %g' % m.objVal)

jobs = []
# sd_p = [0] * p_max
for p in range(p_max):
    jobs.append(Picker(p, batches=[]))
ans = {}
for j, p, k in selected:
    ans[p, k] = []
for j, p, k in selected:
    ans[p, k].append(list(df_orders.index)[j])
l = 0
for x, y in ans.items():
    w = 0
    for order in y:
        w += df_orders.loc[order, 'weight']
    jobs[x[0]].batches.append(Batch(b=l, weight=w, orders=y))

for job in jobs:
    job.re_routing(df_items)