Beispiel #1
0
def First_Add(VMs,T_k,T_m,CPU_kernel,CPU_memory,condition,N):
    if(condition==0):
        index=[]
        for i in range(len(VMs)):
            index.append(VMs[i][2]-VMs[i][1])
        idx=utils.argsort(index)
        end=0
        for i in range(len(VMs)):
            if((T_k+VMs[idx[i]][1])>(N*CPU_kernel+0.1*CPU_kernel)):
                break
            else:
                T_k=T_k+VMs[idx[i]][1]
                end=i
        TVM=copy.deepcopy(VMs)
        for j in range(end):
            VMs.append(TVM[idx[j]])
    else:
        index=[]
        for i in range(len(VMs)):
            index.append(VMs[i][1]*4-VMs[i][2])
        idx=utils.argsort(index)
        end=0
        for i in range(len(VMs)):
            if((T_m+VMs[idx[i]][2])>(N*CPU_memory+0.1*CPU_memory)):
                break
            else:
                T_m=T_m+VMs[idx[i]][2]
                end=i
        TVM=copy.deepcopy(VMs)
        for j in range(end):
            VMs.append(TVM[idx[j]])
    return VMs
Beispiel #2
0
def Reload(Total_S, P_SRate, P_SCondition, P_Servers, VMSC):
    BigM = []
    BigK = []
    for i in range(Total_S):
        if ((sum(P_SCondition[i]) != 0) and (min(P_SCondition[i]) == 0)):
            if (P_SCondition[i][0] > 0):
                BigK.append(i)
            else:
                BigM.append(i)
    for i in range(len(BigM)):
        for j in range(len(BigK)):
            P_SCondition[BigM[i]], P_Servers[BigM[i]], P_SCondition[
                BigK[j]], P_Servers[BigK[j]] = superEX(P_SCondition[BigM[i]],
                                                       P_Servers[BigM[i]],
                                                       P_SCondition[BigK[j]],
                                                       P_Servers[BigK[j]])

    for i in range(Total_S):
        if (sum(P_SCondition[i]) != 0):
            if (P_SCondition[i][0] == 0):
                P_SRate[i] = 0
            else:
                P_SRate[i] = (P_SCondition[i][1] / P_SCondition[i][0])

    LenV = len(VMSC)
    VMSC = Adjust_Order(LenV, VMSC)
    TVMS = copy.deepcopy(VMSC)
    for i in range(LenV):
        info = [VMSC[i][1], VMSC[i][2]]
        # select according to rate
        S_rate = [abs(P_SRate[t] - info[1] / info[0]) for t in range(Total_S)]
        idx = utils.argsort(S_rate)[-1]
        if ((P_SCondition[idx][0] >= info[0])
                and (P_SCondition[idx][1] >= info[1])):
            P_Servers[idx].append(VMSC[i])
            P_SCondition[idx][0] = P_SCondition[idx][0] - info[0]
            P_SCondition[idx][1] = P_SCondition[idx][1] - info[1]
            if (P_SCondition[idx][0] == 0):
                P_SRate[idx] = 0
            else:
                P_SRate[idx] = (P_SCondition[idx][1] / P_SCondition[idx][0])
            TVMS.remove(VMSC[i])
        else:
            for j in range((Total_S - 2), -1, -1):
                idx = utils.argsort(S_rate)[j]
                if ((P_SCondition[idx][0] >= info[0])
                        and (P_SCondition[idx][1] >= info[1])):
                    P_Servers[idx].append(VMSC[i])
                    P_SCondition[idx][0] = P_SCondition[idx][0] - info[0]
                    P_SCondition[idx][1] = P_SCondition[idx][1] - info[1]
                    if (P_SCondition[idx][0] == 0):
                        P_SRate[idx] = 0
                    else:
                        P_SRate[idx] = (P_SCondition[idx][1] /
                                        P_SCondition[idx][0])
                    TVMS.remove(VMSC[i])
                    break
    return P_SRate, P_SCondition, P_Servers, TVMS
Beispiel #3
0
def Allocate(NEPvm,Pvm,N_Stype,Servers):
    VMs=[]
    N_Pvm=len(NEPvm) # The number of predicted VMs
    for i in range(N_Pvm):
        for j in range(int(NEPvm[i])):
            VMs.append(Pvm[i])
    Servers_N=Compute_Servers(VMs,N_Stype,Servers)
    Total_S=sum(Servers_N) #the total number of servers
    P_Servers=[]
    P_SCondition=[]
    P_SRate=[]
    # initialize predicted servers
    for i in range(Total_S):
        P_Servers.append([])
    # initialize condition of each server
    for i in range(N_Stype):
        for j in range(Servers_N[i]):
            P_SCondition.append([Servers[i][1],Servers[i][2]])
            P_SRate.append(Servers[i][2]/Servers[i][1])

    # allocate
    lenVMs=len(VMs)
    VMs=Adjust_Order(lenVMs,VMs)
    VMSC=copy.deepcopy(VMs)
    #print VMSC
    for i in range(lenVMs):
        info=[VMs[i][1],VMs[i][2]]
        # select according to rate
        S_rate=[abs(P_SRate[t]-info[1]/info[0]) for t in range(Total_S)]
        idx=utils.argsort(S_rate)[-1]
        if((P_SCondition[idx][0]>=info[0])and(P_SCondition[idx][1]>=info[1])):
            P_Servers[idx].append(VMs[i])
            P_SCondition[idx][0]=P_SCondition[idx][0]-info[0]
            P_SCondition[idx][1]=P_SCondition[idx][1]-info[1]
            if(P_SCondition[idx][0]==0):
               P_SRate[idx]=0
            else:
               P_SRate[idx]=(P_SCondition[idx][1]/P_SCondition[idx][0])
            VMSC.remove(VMs[i])
        else:
            for j in range((Total_S-2),-1,-1):
                idx=utils.argsort(S_rate)[j]
                if((P_SCondition[idx][0]>=info[0])and(P_SCondition[idx][1]>=info[1])):
                    P_Servers[idx].append(VMs[i])
                    P_SCondition[idx][0]=P_SCondition[idx][0]-info[0]
                    P_SCondition[idx][1]=P_SCondition[idx][1]-info[1]
                    if(P_SCondition[idx][0]==0):
                       P_SRate[idx]=0
                    else:
                       P_SRate[idx]=(P_SCondition[idx][1]/P_SCondition[idx][0])
                    VMSC.remove(VMs[i])
                    break
    P_SRate,P_SCondition,P_Servers,VMSC=Reload.SUPEREX(Total_S,P_SRate,P_SCondition,P_Servers,VMSC)
    P_Servers=EXchange(VMSC,P_SCondition,P_Servers)
    return Servers_N,P_Servers
Beispiel #4
0
def Cond_Sort(VM, condition):
    T_VM = []
    if (condition == 0):
        for i in range(len(VM)):
            T_VM.append(VM[i][1])
        idx = utils.argsort(T_VM)
        return idx
    else:
        for i in range(len(VM)):
            T_VM.append(VM[i][2])
        idx = utils.argsort(T_VM)
        return idx
Beispiel #5
0
 def __hash__(self):
     s_hashes = [hash(s) for s in self.superitems_pool]
     c_hashes = [hash(c) for c in self.superitems_coords]
     strs = [
         f"{s_hashes[i]}/{c_hashes[i]}" for i in utils.argsort(s_hashes)
     ]
     return hash("-".join(strs))
Beispiel #6
0
 def sort_by_densities(self, two_dims=False):
     """
     Sort layers in the pool by decreasing density
     """
     densities = self.get_densities(two_dims=two_dims)
     sorted_indices = utils.argsort(densities, reverse=True)
     self.layers = [self.layers[i] for i in sorted_indices]
Beispiel #7
0
        def _gen_superitems_vertical_subgroup(superitems):
            """
            Vertically stack groups of >= 2 items or superitems with the
            same dimensions to form a taller superitem
            """
            # Add the "width * depth" column and sort superitems
            # in ascending order by that dimension
            wd = [s.width * s.depth for s in superitems]
            superitems = [superitems[i] for i in utils.argsort(wd)]

            # Extract candidate groups made up of >= 2 items or superitems
            slices = []
            for s in range(2, max_vstacked + 1):
                for i in range(0, len(superitems) - (s - 1), s):
                    good = True
                    for j in range(1, s, 1):
                        if (superitems[i + j].width * superitems[i + j].depth
                                >= superitems[i].width * superitems[i].depth
                            ) and (superitems[i].width * superitems[i].depth <=
                                   tol * superitems[i + j].width *
                                   superitems[i + j].depth):
                            good = False
                            break
                    if good:
                        slices += [tuple(superitems[i + j] for j in range(s))]

            # Generate vertical superitems
            subgroup_vertical = []
            for slice in slices:
                subgroup_vertical += [VerticalSuperitem(slice)]

            return subgroup_vertical
Beispiel #8
0
def First_Delete(VMs, CPU_kernel, CPU_memory, condition):
    #the numbor of predicted vms
    T_k = 0
    T_m = 0
    for i in range(len(VMs)):
        T_k = T_k + VMs[i][1]
        T_m = T_m + VMs[i][2]

    KN = float(T_k / CPU_kernel)
    MN = float(T_m / CPU_memory)
    if (KN >= MN):
        N = int(KN)
    else:
        N = int(MN)

    if (condition == 0):
        index = []
        for i in range(len(VMs)):
            index.append(VMs[i][2] - VMs[i][1])
        idx = utils.argsort(index)
        end = 0
        for i in range(len(VMs)):
            if ((T_k - VMs[idx[i]][1]) < (N * CPU_kernel + 0.1 * CPU_kernel)):
                break
            else:
                T_k = T_k - VMs[idx[i]][1]
                end = i
        TVM = copy.deepcopy(VMs)
        for j in range(end):
            VMs.remove(TVM[idx[j]])
    else:
        index = []
        for i in range(len(VMs)):
            index.append(VMs[i][1] * 4 - VMs[i][2])
        idx = utils.argsort(index)
        end = 0
        for i in range(len(VMs)):
            if ((T_m - VMs[idx[i]][2]) < (N * CPU_memory + 0.1 * CPU_memory)):
                break
            else:
                T_m = T_m - VMs[idx[i]][2]
                end = i
        TVM = copy.deepcopy(VMs)
        for j in range(end):
            VMs.remove(TVM[idx[j]])
    return VMs
Beispiel #9
0
def Rebalance(Total_S, P_SRate, P_SCondition, P_Servers, VMSC):
    # rebalance
    for i in range(Total_S):
        if (sum(P_SCondition[i]) != 0):
            VMSC, P_Servers[i], P_SCondition[i] = D_EX(VMSC, P_Servers[i],
                                                       P_SCondition[i])
        if (P_SCondition[i][0] == 0):
            P_SRate[i] = 0
        else:
            P_SRate[i] = (P_SCondition[i][1] / P_SCondition[i][0])
    LenV = len(VMSC)
    VMSC = Adjust_Order(LenV, VMSC)
    TVMS = copy.deepcopy(VMSC)
    for i in range(LenV):
        info = [VMSC[i][1], VMSC[i][2]]
        # select according to rate
        S_rate = [abs(P_SRate[t] - info[1] / info[0]) for t in range(Total_S)]
        idx = utils.argsort(S_rate)[-1]
        if ((P_SCondition[idx][0] >= info[0])
                and (P_SCondition[idx][1] >= info[1])):
            P_Servers[idx].append(VMSC[i])
            P_SCondition[idx][0] = P_SCondition[idx][0] - info[0]
            P_SCondition[idx][1] = P_SCondition[idx][1] - info[1]
            if (P_SCondition[idx][0] == 0):
                P_SRate[idx] = 0
            else:
                P_SRate[idx] = (P_SCondition[idx][1] / P_SCondition[idx][0])
            TVMS.remove(VMSC[i])
        else:
            for j in range((Total_S - 2), -1, -1):
                idx = utils.argsort(S_rate)[j]
                if ((P_SCondition[idx][0] >= info[0])
                        and (P_SCondition[idx][1] >= info[1])):
                    P_Servers[idx].append(VMSC[i])
                    P_SCondition[idx][0] = P_SCondition[idx][0] - info[0]
                    P_SCondition[idx][1] = P_SCondition[idx][1] - info[1]
                    if (P_SCondition[idx][0] == 0):
                        P_SRate[idx] = 0
                    else:
                        P_SRate[idx] = (P_SCondition[idx][1] /
                                        P_SCondition[idx][0])
                    TVMS.remove(VMSC[i])
                    break
    return P_SRate, P_SCondition, P_Servers, TVMS
Beispiel #10
0
def Compute_Servers(VMs, N_Stype, Servers):
    Servers_N = [0 for i in range(N_Stype)]
    minL = Limit(N_Stype, Servers)
    T_k, T_m = Total_R(VMs)
    print T_k, T_m
    L = []  #the up limit
    N = []
    for i in range(N_Stype):
        L.append([
            i,
            int(
                max([
                    math.ceil(T_k / Servers[i][1]),
                    math.ceil(T_m / Servers[i][2])
                ]))
        ])
        N.append(L[i][1])
    # Sort low to high
    idx = utils.argsort(N)
    Min = (T_k + T_m)
    S_N = []
    if (N_Stype == 3):
        for t1 in range(N[idx[0]]):
            for t2 in range(N[idx[1]]):
                for t3 in range(N[idx[2]]):
                    M = abs(T_k - (Servers[L[idx[0]][0]][1] * t1) -
                            (Servers[L[idx[1]][0]][1] * t2) -
                            (Servers[L[idx[2]][0]][1] * t3))
                    M = M + abs(T_m - (Servers[L[idx[0]][0]][2] * t1) -
                                (Servers[L[idx[1]][0]][2] * t2) -
                                (Servers[L[idx[2]][0]][2] * t3))
                    if (M < Min):
                        Min = M
                        Servers_N[idx[0]] = t1
                        Servers_N[idx[1]] = t2
                        Servers_N[idx[2]] = t3
    elif (N_Stype == 2):
        for t1 in range(N[idx[0]]):
            for t2 in range(N[idx[1]]):
                M = abs(T_k - (Servers[L[idx[0]][0]][1] * t1) -
                        (Servers[L[idx[1]][0]][1] * t2))
                M = M + abs(T_m - (Servers[L[idx[0]][0]][2] * t1) -
                            (Servers[L[idx[1]][0]][2] * t2))
                if (M < Min):
                    Min = M
                    Servers_N[idx[0]] = t1
                    Servers_N[idx[1]] = t2
    else:
        for t1 in range(N[idx[0]]):
            M = abs(T_k - (Servers[L[idx[0]][0]][1] *
                           t1)) + abs(T_m - (Servers[L[idx[0]][0]][2] * t1))
            if (M < Min):
                Min = M
                Servers_N[idx[0]] = t1
    print Servers_N
    return Servers_N
Beispiel #11
0
def main(args):
    this_dir = osp.join(osp.dirname(__file__), '.')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    data_loader = data.DataLoader(
        DataLayer(
            data_root=osp.join(args.data_root, 'Test'),
            phase='Test',
        ),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
    )

    if osp.isfile(args.checkpoint):
        checkpoint = torch.load(args.checkpoint)
    else:
        raise (RuntimeError('Cannot find the checkpoint {}'.format(
            args.checkpoint)))
    model = Model().to(device)
    model.load_state_dict(checkpoint)
    model.train(False)
    softmax = nn.Softmax(dim=1).to(device)

    corrects = 0.0
    with torch.set_grad_enabled(False):
        for batch_idx, (spatial, temporal, length,
                        target) in enumerate(data_loader):
            spatial_input = torch.zeros(*spatial.shape)
            temporal_input = torch.zeros(*temporal.shape)
            target_input = []
            length_input = []

            index = utl.argsort(length)[::-1]
            for i, idx in enumerate(index):
                spatial_input[i] = spatial[idx]
                temporal_input[i] = temporal[idx]
                target_input.append(target[idx])
                length_input.append(length[idx])

            spatial_input = spatial_input.to(device)
            temporal_input = temporal_input.to(device)
            target_input = torch.LongTensor(target_input).to(device)
            pack1 = pack_padded_sequence(spatial_input,
                                         length_input,
                                         batch_first=True)
            pack2 = pack_padded_sequence(temporal_input,
                                         length_input,
                                         batch_first=True)

            score = model(pack1, pack2)
            pred = torch.max(softmax(score), 1)[1].cpu()
            corrects += torch.sum(pred == target_input.cpu()).item()

    print('The accuracy is {:.4f}'.format(corrects / len(data_loader.dataset)))
Beispiel #12
0
    def _display_pixmaps_(self, files, pixmaps):
        from utils import argsort, reordered, creation_date

        if self._settings_.sort_images == 'name':
            sorting = argsort(files, key=lambda f: f.name.lower())
        else:
            sorting = argsort(files, key=lambda f: creation_date(f.full_path))

        self.visible_files = reordered(files, sorting)

        for f, img in zip(self.visible_files, reordered(pixmaps, sorting)):
            icon = QIcon()
            icon.addPixmap(img)
            item = QListWidgetItem(icon, f.name.split('.')[0])
            item.setTextAlignment(Qt.AlignBottom)
            item.setToolTip(f.local_path)
            item.setData(QListWidgetItem.UserType, f)
            # item.setSizeHint(QSize(120, 80))
            self.list.addItem(item)
Beispiel #13
0
 def _place_new_layers(superitems_list, remaining_heights):
     """
     Try to place items in the bin with the least spare height
     and fallback to the other open bins, if the layer doesn't fit
     """
     sorted_indices = utils.argsort(remaining_heights)
     working_index = 0
     while len(superitems_list) > 0 and working_index < len(self.bins):
         working_bin = self.bins[sorted_indices[working_index]]
         to_place = _get_placeable_items(superitems_list, working_bin)
         if len(to_place) > 0:
             layer = _get_new_layer(to_place)
             self.layer_pool.add(layer)
             working_bin.add(layer)
             for s in layer.superitems_pool:
                 superitems_list.remove(s)
         else:
             working_index = working_index + 1
     return superitems_list
    def _predict_single_sample(self, sample):
        '''
        :param sample: predicting label for sample
        :return: the classification
        '''
        if self.X is None:
            raise Exception('KNN data was not initialize with fit method.')
        if self.attributes_len != len(sample):
            raise ValueError('sample len is not as DATA\'s len')

        hamming_distances = []
        for x in self.X:
            hamming_distances.append(self.__distance(sample, x))
        argsort = utils.argsort(hamming_distances)[:self.k]
        predictions = [self.y[i] for i in argsort]
        predictions_class_counter = [0] * len(self.classes)

        for p in predictions:
            predictions_class_counter[self.classes_map.get(p)] += 1

        return self.classes[utils.argmax(predictions_class_counter)]
Beispiel #15
0
def solve_pieces(g, remaining_eqs, seen_asm, indent):
    graphs = list(connected_component_subgraphs(g))
    subeqs = [{n for n in subg if n in remaining_eqs} for subg in graphs]
    #print(subeqs)
    assert len(graphs) > 1
    # Recursion here:
    sols = list(solve_problem_impl(subg, seqs, seen_asm, indent=indent+1) \
                                       for subg, seqs in izip(graphs, subeqs))
    # cost, rowp = sols[i]
    total_cost = sum(t[0] for t in sols)
    # We chain together the pieces again, we order the pieces in such a way that
    # the pieces are "pushed" towards the bottom left corner of the matrix.
    # c_minus_r = (number of columns) - (number of rows)
    # Ties broken as follows: easy first, then lower row label first.
    # Note that this sorting is not necessary for correctness; it is for 
    # esthetic reasons only. See notes Jan 07, 2016. 
    c_minus_r = [len(subg)-2*len(seqs) for subg, seqs in izip(graphs, subeqs)]
    keys = [(c_m_r, cost, min(seqs)) for c_m_r, (cost, _), seqs in izip(c_minus_r, sols, subeqs)]
    elims = list(chain.from_iterable(sols[i][1] for i in argsort(keys)))
    # The t profile is the number of torn variables when the equation was 
    # eliminated, listed in the order of the elimination.
    # Compare also with apply_exclusion_rule_on_pieces
    return total_cost, elims, get_t_profiles(graphs, sols)
Beispiel #16
0
def Cond_Sort(VM):
    T_VM=[]
    for i in range(len(VM)):
        T_VM.append(VM[i][1])
    idx=utils.argsort(T_VM)
    return idx
Beispiel #17
0
def main(args):
    this_dir = osp.join(osp.dirname(__file__), '.')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    datasets = {
        phase: DataLayer(
            data_root=osp.join(args.data_root, phase),
            phase=phase,
        )
        for phase in args.phases
    }

    data_loaders = {
        phase: data.DataLoader(
            datasets[phase],
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers,
        )
        for phase in args.phases
    }

    model = Model(
        input_size=args.input_size,
        hidden_size=args.hidden_size,
        bidirectional=args.bidirectional,
        num_classes=args.num_classes,
    ).apply(utl.weights_init).to(device)
    criterion = nn.CrossEntropyLoss().to(device)
    softmax = nn.Softmax(dim=1).to(device)
    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)

    for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
        losses = {phase: 0.0 for phase in args.phases}
        corrects = {phase: 0.0 for phase in args.phases}

        start = time.time()
        for phase in args.phases:
            training = 'Test' not in phase
            if training:
                model.train(True)
            else:
                if epoch in args.test_intervals:
                    model.train(False)
                else:
                    continue

            with torch.set_grad_enabled(training):
                for batch_idx, (spatial, temporal, length,
                                target) in enumerate(data_loaders[phase]):
                    spatial_input = torch.zeros(*spatial.shape)
                    temporal_input = torch.zeros(*temporal.shape)
                    target_input = []
                    length_input = []

                    index = utl.argsort(length)[::-1]
                    for i, idx in enumerate(index):
                        spatial_input[i] = spatial[idx]
                        temporal_input[i] = temporal[idx]
                        target_input.append(target[idx])
                        length_input.append(length[idx])

                    spatial_input = spatial_input.to(device)
                    temporal_input = temporal_input.to(device)
                    target_input = torch.LongTensor(target_input).to(device)
                    pack1 = pack_padded_sequence(spatial_input,
                                                 length_input,
                                                 batch_first=True)
                    pack2 = pack_padded_sequence(temporal_input,
                                                 length_input,
                                                 batch_first=True)

                    score = model(pack1, pack2)
                    loss = criterion(score, target_input)
                    losses[phase] += loss.item() * target_input.shape[0]
                    if args.debug:
                        print(loss.item())

                    if training:
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()
                    else:
                        pred = torch.max(softmax(score), 1)[1].cpu()
                        corrects[phase] += torch.sum(
                            pred == target_input.cpu()).item()
        end = time.time()

        print('Epoch {:2} | '
              'Train loss: {:.5f} Val loss: {:.5f} | '
              'Test loss: {:.5f} accuracy: {:.5f} | '
              'running time: {:.2f} sec'.format(
                  epoch,
                  losses['Train'] / len(data_loaders['Train'].dataset),
                  losses['Validation'] /
                  len(data_loaders['Validation'].dataset),
                  losses['Test'] / len(data_loaders['Test'].dataset),
                  corrects['Test'] / len(data_loaders['Test'].dataset),
                  end - start,
              ))

        if epoch in args.test_intervals:
            torch.save(
                model.state_dict(),
                osp.join(this_dir,
                         './state_dict-epoch-' + str(epoch) + '.pth'))
Beispiel #18
0
def left_idx(CPULR):
    T_L = []
    for i in range(len(CPULR)):
        T_L.append(CPULR[0] + CPULR[1])
    idx = utils.argsort(T_L)
    return idx
Beispiel #19
0
    def _place_not_covered(self, singles_removed=None, area_tol=1.0):
        """
        Place the remaining items (not superitems) either on top
        of existing bins or in a whole new bin, if they do not fit
        """
        def _get_unplaceable_items(superitems_list, max_spare_height):
            """
            Return items that must be placed in a new bin
            """
            index = len(superitems_list)
            for i, s in enumerate(superitems_list):
                if s.height > max_spare_height:
                    index = i
                    break
            return superitems_list[:index], superitems_list[index:]

        def _get_placeable_items(superitems_list, working_bin):
            """
            Return items that can be placed in a new layer
            in the given bin
            """
            to_place = []
            for s in superitems_list:
                last_layer_area = working_bin.layer_pool[-1].area
                max_area = np.clip(area_tol * last_layer_area, 0,
                                   self.pallet_dims.area)
                area = sum(s.area for s in to_place)
                if area < max_area and s.height < working_bin.remaining_height:
                    to_place += [s]
                else:
                    break
            return to_place

        def _get_new_layer(to_place):
            """
            Place the maximum amount of items that can fit in
            a new layer, starting from the given pool
            """
            assert len(
                to_place) > 0, "The number of superitems to place must be > 0"
            spool = superitems.SuperitemPool(superitems=to_place)
            layer = maxrects.maxrects_single_layer_online(
                spool, self.pallet_dims)
            return layer

        def _place_new_layers(superitems_list, remaining_heights):
            """
            Try to place items in the bin with the least spare height
            and fallback to the other open bins, if the layer doesn't fit
            """
            sorted_indices = utils.argsort(remaining_heights)
            working_index = 0
            while len(superitems_list) > 0 and working_index < len(self.bins):
                working_bin = self.bins[sorted_indices[working_index]]
                to_place = _get_placeable_items(superitems_list, working_bin)
                if len(to_place) > 0:
                    layer = _get_new_layer(to_place)
                    self.layer_pool.add(layer)
                    working_bin.add(layer)
                    for s in layer.superitems_pool:
                        superitems_list.remove(s)
                else:
                    working_index = working_index + 1
            return superitems_list

        # Get single superitems that are not yet covered
        # (assuming that the superitems pool in the layer pool contains all single superitems)
        superitems_list = self.layer_pool.not_covered_single_superitems(
            singles_removed=singles_removed)

        # Sort superitems by ascending height
        superitems_list = [
            superitems_list[i]
            for i in utils.argsort([s.height for s in superitems_list])
        ]

        # Get placeable and unplaceable items
        remaining_heights = self.get_remaining_heights()
        max_remaining_height = 0 if len(remaining_heights) == 0 else max(
            remaining_heights)
        superitems_list, remaining_items = _get_unplaceable_items(
            superitems_list, max_remaining_height)
        superitems_list = _place_new_layers(superitems_list, remaining_heights)

        # Place unplaceable items in a new bin
        remaining_items += superitems_list
        if len(remaining_items) > 0:
            spool = superitems.SuperitemPool(superitems=remaining_items)
            lpool = maxrects.maxrects_multiple_layers(spool,
                                                      self.pallet_dims,
                                                      add_single=False)
            self.layer_pool.extend(lpool)
            self.bins += self._build(lpool)
Beispiel #20
0
def maxrects_single_layer_online(superitems_pool,
                                 pallet_dims,
                                 superitems_duals=None):
    """
    Given a superitems pool and the maximum dimensions to pack them into, try to fit
    the greatest number of superitems in a single layer following the given order
    """
    logger.debug("MR-SL-Online starting")

    # If no duals are given use superitems' heights as a fallback
    ws, ds, hs = superitems_pool.get_superitems_dims()
    if superitems_duals is None:
        superitems_duals = np.array(hs)

    # Sort rectangles by duals
    indexes = utils.argsort(list(zip(superitems_duals, hs)), reverse=True)
    logger.debug(
        f"MR-SL-Online {sum(superitems_duals[i] > 0 for i in indexes)} non-zero duals to place"
    )

    # Iterate over each placement strategy
    generated_layers, num_duals = [], []
    for strategy in MAXRECTS_PACKING_STRATEGIES:
        # Create the maxrects packing algorithm
        packer = newPacker(
            mode=PackingMode.Online,
            pack_algo=strategy,
            rotation=False,
        )

        # Add one bin representing one layer
        packer.add_bin(pallet_dims.width, pallet_dims.depth, count=1)

        # Online packing procedure
        n_packed, non_zero_packed, layer_height = 0, 0, 0
        for i in indexes:
            if superitems_duals[i] > 0 or hs[i] <= layer_height:
                packer.add_rect(ws[i], ds[i], i)
                if len(packer[0]) > n_packed:
                    n_packed = len(packer[0])
                    if superitems_duals[i] > 0:
                        non_zero_packed += 1
                    if hs[i] > layer_height:
                        layer_height = hs[i]
        num_duals += [non_zero_packed]

        # Build layer after packing
        spool, coords = [], []
        for s in packer[0]:
            spool += [superitems_pool[s.rid]]
            coords += [utils.Coordinate(s.x, s.y)]
        layer = layers.Layer(superitems.SuperitemPool(spool), coords,
                             pallet_dims)
        generated_layers += [layer]

    # Find the best layer by taking into account the number of
    # placed superitems with non-zero duals and density
    layer_indexes = utils.argsort(
        [(duals, layer.get_density(two_dims=False))
         for duals, layer in zip(num_duals, generated_layers)],
        reverse=True,
    )
    layer = generated_layers[layer_indexes[0]]

    logger.debug(
        f"MR-SL-Online generated a new layer with {len(layer)} superitems "
        f"(of which {num_duals[layer_indexes[0]]} with non-zero dual) "
        f"and {layer.get_density(two_dims=False)} 3D density")
    return layer
Beispiel #21
0
def maxrects_multiple_layers(superitems_pool, pallet_dims, add_single=True):
    """
    Given a superitems pool and the maximum dimensions to pack them into,
    return a layer pool with warm start placements
    """
    logger.debug("MR-ML-Offline starting")
    logger.debug(
        f"MR-ML-Offline {'used' if add_single else 'not_used'} as warm_start")
    logger.debug(f"MR-ML-Offline {len(superitems_pool)} superitems to place")

    # Return a layer with a single item if only one is present in the superitems pool
    if len(superitems_pool) == 1:
        layer_pool = layers.LayerPool(superitems_pool,
                                      pallet_dims,
                                      add_single=True)
        uncovered = 0
    else:
        generated_pools = []
        for strategy in MAXRECTS_PACKING_STRATEGIES:
            # Build initial layer pool
            layer_pool = layers.LayerPool(superitems_pool,
                                          pallet_dims,
                                          add_single=add_single)

            # Create the maxrects packing algorithm
            packer = newPacker(
                mode=PackingMode.Offline,
                bin_algo=PackingBin.Global,
                pack_algo=strategy,
                sort_algo=SORT_AREA,
                rotation=False,
            )

            # Add an infinite number of layers (no upper bound)
            packer.add_bin(pallet_dims.width,
                           pallet_dims.depth,
                           count=float("inf"))

            # Add superitems to be packed
            ws, ds, _ = superitems_pool.get_superitems_dims()
            for i, (w, d) in enumerate(zip(ws, ds)):
                packer.add_rect(w, d, rid=i)

            # Start the packing procedure
            packer.pack()

            # Build a layer pool
            for layer in packer:
                spool, scoords = [], []
                for superitem in layer:
                    spool += [superitems_pool[superitem.rid]]
                    scoords += [utils.Coordinate(superitem.x, superitem.y)]

                spool = superitems.SuperitemPool(superitems=spool)
                layer_pool.add(layers.Layer(spool, scoords, pallet_dims))
                layer_pool.sort_by_densities(two_dims=False)

            # Add the layer pool to the list of generated pools
            generated_pools += [layer_pool]

        # Find the best layer pool by considering the number of placed superitems,
        # the number of generated layers and the density of each layer dense
        uncovered = [
            len(pool.not_covered_superitems()) for pool in generated_pools
        ]
        n_layers = [len(pool) for pool in generated_pools]
        densities = [
            pool[0].get_density(two_dims=False) for pool in generated_pools
        ]
        pool_indexes = utils.argsort(list(zip(uncovered, n_layers, densities)),
                                     reverse=True)
        layer_pool = generated_pools[pool_indexes[0]]
        uncovered = uncovered[pool_indexes[0]]

    logger.debug(
        f"MR-ML-Offline generated {len(layer_pool)} layers with 3D densities {layer_pool.get_densities(two_dims=False)}"
    )
    logger.debug(
        f"MR-ML-Offline placed {len(superitems_pool) - uncovered}/{len(superitems_pool)} superitems"
    )
    return layer_pool
Beispiel #22
0
def pricing_problem_placement_cp(
    superitems_pool, superitems_in_layer, pallet_dims, duals, tlim=None, enable_output=False
):
    """
    Solve the pricing subproblem placement using a CP approach
    """
    logger.info("SP-P-CP defining variables and constraints")

    # Utility
    ws, ds, _ = superitems_pool.get_superitems_dims()
    sduals = superitems_duals(superitems_pool, duals)

    # Model and Solver
    mdl = cp_model.CpModel()
    slv = cp_model.CpSolver()

    # Variables
    cblx = {
        s: mdl.NewIntVar(0, pallet_dims.width - ws[s], f"c_bl_{s}_x") for s in superitems_in_layer
    }
    cbly = {
        s: mdl.NewIntVar(0, pallet_dims.depth - ds[s], f"c_bl_{s}_y") for s in superitems_in_layer
    }
    ctrx = {s: mdl.NewIntVar(ws[s], pallet_dims.width, f"c_tr_{s}_x") for s in superitems_in_layer}
    ctry = {s: mdl.NewIntVar(ds[s], pallet_dims.width, f"c_tr_{s}_y") for s in superitems_in_layer}
    xint = [
        mdl.NewIntervalVar(cblx[s], mdl.NewConstant(ws[s]), ctrx[s], f"xint_{s}")
        for s in superitems_in_layer
    ]
    yint = [
        mdl.NewIntervalVar(cbly[s], mdl.NewConstant(ds[s]), ctry[s], f"yint_{s}")
        for s in superitems_in_layer
    ]

    # Constraints
    mdl.AddNoOverlap2D(xint, yint)
    mdl.AddCumulative(
        xint, [mdl.NewConstant(ds[s]) for s in superitems_in_layer], pallet_dims.depth
    )
    mdl.AddCumulative(
        yint, [mdl.NewConstant(ws[s]) for s in superitems_in_layer], pallet_dims.width
    )

    # Symmetry Breaking
    areas = [ws[s] * ds[s] for s in superitems_in_layer]
    area_ind = utils.argsort(areas, reverse=True)
    biggest_ind = superitems_in_layer[area_ind[0]]
    second_ind = superitems_in_layer[area_ind[1]]
    mdl.Add(cblx[biggest_ind] <= mdl.NewConstant(pallet_dims.width // 2))
    mdl.Add(cbly[biggest_ind] <= mdl.NewConstant(pallet_dims.depth // 2))
    mdl.Add(cblx[biggest_ind] <= cblx[second_ind])
    mdl.Add(cbly[biggest_ind] <= cbly[second_ind])

    # Search strategy
    indexes = utils.argsort([sduals[s] for s in superitems_in_layer], reverse=True)
    mdl.AddDecisionStrategy(
        [xint[i] for i in indexes], cp_model.CHOOSE_FIRST, cp_model.SELECT_MIN_VALUE
    )
    mdl.AddDecisionStrategy(
        [yint[i] for i in indexes], cp_model.CHOOSE_FIRST, cp_model.SELECT_MIN_VALUE
    )

    # Set a time limit in seconds
    if tlim is not None:
        slv.parameters.max_time_in_seconds = tlim

    # Solve
    slv.parameters.num_search_workers = 4
    slv.parameters.log_search_progress = enable_output
    slv.parameters.search_branching = cp_model.FIXED_SEARCH
    status = slv.Solve(mdl)

    # Extract results
    layer = None
    if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
        logger.info(f"SP-P-CP solved")

        # Extract coordinates
        sol = dict()
        for s in superitems_in_layer:
            sol[f"c_{s}_x"] = slv.Value(cblx[s])
            sol[f"c_{s}_y"] = slv.Value(cbly[s])

        # Build layer
        layer = utils.build_layer_from_model_output(
            superitems_pool,
            superitems_in_layer,
            sol,
            pallet_dims,
        )
    else:
        logger.warning("SP-P-CP unfeasible")

    logger.debug(f"SP-P-CP time: {slv.WallTime()}")
    return layer
Beispiel #23
0
def Boxing(NEPvm, Pvm, N_Pvm, CPU_kernel, CPU_memory, condition):
    # Add
    T_idx = utils.argsort(NEPvm)
    NEPvm[(T_idx[0])] = NEPvm[(T_idx[0])] + 1

    VMs = []
    # all Predicted vms
    for i in range(N_Pvm):
        for j in range(int(NEPvm[i])):
            VMs.append(Pvm[i])

    VMs = First_Delete(VMs, CPU_kernel, CPU_memory, condition)
    lenVMs = len(VMs)  # The number of all predicted VMs
    # Split vms according to M/K
    VM1 = []
    VM2 = []
    VM4 = []
    for i in range(lenVMs):
        VMs_info = [VMs[i][1], VMs[i][2]]
        if (VMs_info[1] / VMs_info[0] == 1):
            VM1.append(VMs[i])
        elif (VMs_info[1] / VMs_info[0] == 2):
            VM2.append(VMs[i])
        else:
            VM4.append(VMs[i])
    VMs = []  #reset
    VM4 = Adjust(VM4, 0)
    VM2 = Adjust(VM2, 0)
    VM1 = Adjust(VM1, 0)
    T_VMs = [VM4, VM2, VM1]
    len4 = len(VM4)
    len2 = len(VM2)
    len1 = len(VM1)
    #maybe exist zero
    if (len4 == 0):
        len4 = 100
    if (len2 == 0):
        len2 = 100
    if (len1 == 0):
        len1 = 100

    for i in range(lenVMs):
        TEMP = [(len(T_VMs[0]) / len4), (len(T_VMs[1]) / len2),
                (len(T_VMs[2]) / len1)]
        M = TEMP.index(max(TEMP))
        if (len(T_VMs[M]) != 0):
            VMs.append(T_VMs[M][0])
            (T_VMs[M]).remove(T_VMs[M][0])

    CPU = []
    CPU.append([])
    CPULR = []  #resorce left
    N_PCPU = 0  #default 0
    VMSC = copy.deepcopy(VMs)
    while (1):
        CPU_limit = [CPU_kernel, CPU_memory]
        lenVM = len(VMs)
        for j in range(lenVM):
            VMs_info = [VMs[j][1], VMs[j][2]]
            if (CPU_limit[0] >= VMs_info[0] and CPU_limit[1] >= VMs_info[1]):
                CPU[N_PCPU].append(VMs[j][0])
                CPU_limit[0] = CPU_limit[0] - VMs_info[0]
                CPU_limit[1] = CPU_limit[1] - VMs_info[1]
                VMSC.remove(VMs[j])
        CPULR.append(CPU_limit)
        if (utils.SUM_Judge(VMSC, CPU_kernel, CPU_memory)):
            #CPU=Optimize(CPU,CPULR,VMSC,Pvm,N_Pvm)
            break
        else:
            VMs = copy.deepcopy(VMSC)
            CPU.append([])
            N_PCPU = N_PCPU + 1
    return CPU, (N_PCPU + 1)
Beispiel #24
0
def Boxing(N_PCPU, NEPvm, Pvm, N_Pvm, CPU_kernal, CPU_memory):
    VMs = []
    # all Predicted vms
    for i in range(N_Pvm):
        for j in range(int(NEPvm[i])):
            VMs.append(Pvm[i])
    lenVMs = len(VMs)
    # The number of all predicted VMs
    CPU = []
    for i in range(N_PCPU):
        CPU.append([])
    # intialize the cpu list
    VMs_info = [[0 for i in range(2)] for j in range(lenVMs)]
    for i in range(lenVMs):
        VMs_info[i] = utils.SelectVM(VMs[i][0], Pvm, N_Pvm)
    # get each cpu information
    index = [0 for i in range(lenVMs)]
    for j in range(lenVMs):
        index[j] = VMs_info[j][1] - VMs_info[j][0]
    idx = utils.argsort(index)
    # get the index (ascend)

    temp_vms = []
    temp_vminfo = []
    for i in range(lenVMs):
        temp_vms.append(VMs[idx[i]])
        temp_vminfo.append(VMs_info[idx[i]])
    VMs = temp_vms
    VMs_info = temp_vminfo
    # get the sorted array
    CPU_index = [(CPU_memory - CPU_kernal) for i in range(N_PCPU)]
    CPU_limit = [[CPU_kernal, CPU_memory] for i in range(N_PCPU)]
    for j in range(lenVMs):
        count = 0
        idx = utils.argsort(CPU_index)
        for z in range(N_PCPU):
            if (CPU_limit[idx[z]][0] >= VMs_info[j][0]
                    and CPU_limit[idx[z]][1] >= VMs_info[j][1]):
                CPU[idx[z]].append(VMs[j][0])
                CPU_limit[idx[z]][0] = CPU_limit[idx[z]][0] - VMs_info[j][0]
                CPU_limit[idx[z]][1] = CPU_limit[idx[z]][1] - VMs_info[j][1]
                CPU_index[idx[z]] = CPU_index[
                    idx[z]] - VMs_info[j][1] + VMs_info[j][0]
                break
            else:
                count = count + 1
        if (count == len(CPU)):
            N_PCPU = N_PCPU + 1
            CPU.append([])
            CPU_limit.append([CPU_kernal, CPU_memory])
            CPU_index.append((CPU_memory - CPU_kernal))
            CPU[(N_PCPU - 1)].append(VMs[j][0])
            CPU_limit[(N_PCPU -
                       1)][0] = CPU_limit[(N_PCPU - 1)][0] - VMs_info[j][0]
            CPU_limit[(N_PCPU -
                       1)][1] = CPU_limit[(N_PCPU - 1)][1] - VMs_info[j][1]
            CPU_index[(
                N_PCPU -
                1)] = CPU_index[(N_PCPU - 1)] - VMs_info[j][1] + VMs_info[j][0]

    return CPU, N_PCPU
Beispiel #25
0
    def remove_duplicated_items(self, min_density=0.5, two_dims=False):
        """
        Keep items that are covered multiple times only
        in the layers with the highest densities
        """
        assert min_density >= 0.0, "Density tolerance must be non-negative"
        selected_layers = copy.deepcopy(self)
        all_item_ids = selected_layers.get_unique_items_ids()
        item_coverage = dict(zip(all_item_ids, [False] * len(all_item_ids)))
        edited_layers, to_remove = set(), set()
        for l in range(len(selected_layers)):
            layer = selected_layers[l]
            item_ids = layer.get_unique_items_ids()
            for item in item_ids:
                duplicated_superitems, duplicated_indices = layer.get_superitems_containing_item(
                    item)
                # Remove superitems in different layers containing the same item
                # (remove the ones in less dense layers)
                if item_coverage[item]:
                    edited_layers.add(l)
                    layer = layer.difference(duplicated_indices)
                # Remove superitems in the same layer containing the same item
                # (remove the ones with less volume)
                elif len(duplicated_indices) > 1:
                    edited_layers.add(l)
                    duplicated_volumes = [
                        s.volume for s in duplicated_superitems
                    ]
                    layer = layer.difference([
                        duplicated_indices[i]
                        for i in utils.argsort(duplicated_volumes)[:-1]
                    ])

            if l in edited_layers:
                # Flag the layer if it doesn't respect the minimum density
                density = layer.get_density(two_dims=two_dims)
                if density < min_density or density == 0:
                    to_remove.add(l)
                # Replace the original layer with the edited one
                else:
                    selected_layers.replace(l, layer)

            # Update item coverage
            if l not in to_remove:
                item_ids = selected_layers[l].get_unique_items_ids()
                for item in item_ids:
                    item_coverage[item] = True

        # Rearrange layers in which at least one superitem was removed
        for l in edited_layers:
            if l not in to_remove:
                layer = selected_layers[l].rearrange()
                if layer is not None:
                    selected_layers[l] = layer
                else:
                    logger.error(
                        f"After removing duplicated items couldn't rearrange layer {l}"
                    )

        # Removing layers last to first to avoid indexing errors
        for l in sorted(to_remove, reverse=True):
            selected_layers.pop(l)

        return selected_layers
Beispiel #26
0
def baseline_model(fsi, ws, ds, hs, pallet_dims, tlim=None, num_workers=4):
    """
    The baseline model directly assigns superitems to layers and positions
    them by taking into account overlapment and layer height minimization.
    It reproduces model [SPPSI] of the referenced paper (beware that it
    might be very slow and we advice using it only for orders under 30 items).

    Samir Elhedhli, Fatma Gzara, Burak Yildiz,
    "Three-Dimensional Bin Packing and Mixed-Case Palletization",
    INFORMS Journal on Optimization, 2019.
    """
    # Model and solver declaration
    model = cp_model.CpModel()
    solver = cp_model.CpSolver()

    # Utility
    n_superitems, n_items = fsi.shape
    max_layers = n_items

    # Variables
    # Layer heights variables
    ol = {l: model.NewIntVar(0, max(hs), f"o_{l}") for l in range(max_layers)}
    zsl, cix, ciy, xsj, ysj = dict(), dict(), dict(), dict(), dict()
    for s in range(n_superitems):
        # Coordinate variables
        cix[s] = model.NewIntVar(0, int(pallet_dims.width - ws[s]), f"c_{s}_x")
        ciy[s] = model.NewIntVar(0, int(pallet_dims.depth - ds[s]), f"c_{s}_y")

        # Precedence variables
        for j in range(n_superitems):
            if j != s:
                xsj[s, j] = model.NewBoolVar(f"x_{s}_{j}")
                ysj[s, j] = model.NewBoolVar(f"y_{s}_{j}")

        # Superitems to layer assignment variables
        for l in range(max_layers):
            zsl[s, l] = model.NewBoolVar(f"z_{s}_{l}")

    # Channeling variables
    # same[s, j, l] = 1 iff superitems s and j are both in layer l
    same = dict()
    for l in range(max_layers):
        for s in range(n_superitems):
            for j in range(n_superitems):
                if j != s:
                    same[s, j, l] = model.NewBoolVar(f"s_{s}_{j}_{l}")
                    model.Add(same[s, j, l] == 1).OnlyEnforceIf([zsl[s, l], zsl[j, l]])
                    model.Add(same[s, j, l] == 0).OnlyEnforceIf([zsl[s, l].Not(), zsl[j, l]])
                    model.Add(same[s, j, l] == 0).OnlyEnforceIf([zsl[s, l], zsl[j, l].Not()])
                    model.Add(same[s, j, l] == 0).OnlyEnforceIf([zsl[s, l].Not(), zsl[j, l].Not()])

    # Constraints
    # Ensure that every item is included in exactly one layer
    for i in range(n_items):
        model.Add(
            cp_model.LinearExpr.Sum(
                fsi[s, i] * zsl[s, l] for s in range(n_superitems) for l in range(max_layers)
            )
            == 1
        )

    # Define the height of layer l
    for l in range(max_layers):
        for s in range(n_superitems):
            model.Add(ol[l] >= hs[s] * zsl[s, l])

    # Redundant valid cuts that force the area of
    # a layer to fit within the area of a bin
    model.Add(
        cp_model.LinearExpr.Sum(
            ws[s] * ds[s] * zsl[s, l] for l in range(max_layers) for s in range(n_superitems)
        )
        <= pallet_dims.area
    )

    # Enforce at least one relative positioning relationship
    # between each pair of items in a layer
    for l in range(max_layers):
        for s in range(n_superitems):
            for j in range(n_superitems):
                if j > s:
                    model.Add(xsj[s, j] + xsj[j, s] + ysj[s, j] + ysj[j, s] >= 1).OnlyEnforceIf(
                        [same[s, j, l]]
                    )

    # Ensure that there is at most one spatial relationship
    # between items i and j along the width and depth dimensions
    for l in range(max_layers):
        for s in range(n_superitems):
            for j in range(n_superitems):
                if j > s:
                    model.Add(xsj[s, j] + xsj[j, s] <= 1).OnlyEnforceIf([same[s, j, l]])
                    model.Add(ysj[s, j] + ysj[j, s] <= 1).OnlyEnforceIf([same[s, j, l]])

    # Non-overlapping constraints
    for l in range(max_layers):
        for s in range(n_superitems):
            for j in range(n_superitems):
                if j != s:
                    model.Add(
                        cix[s] + ws[s] <= cix[j] + pallet_dims.width * (1 - xsj[s, j])
                    ).OnlyEnforceIf([same[s, j, l]])
                    model.Add(
                        ciy[s] + ds[s] <= ciy[j] + pallet_dims.depth * (1 - ysj[s, j])
                    ).OnlyEnforceIf([same[s, j, l]])

    # Minimize the sum of layer heights
    obj = cp_model.LinearExpr.Sum(ol[l] for l in range(max_layers))
    model.Minimize(obj)

    # Search by biggest area first
    indices_by_area = utils.argsort([ws[s] * ds[s] for s in range(n_superitems)], reverse=True)
    model.AddDecisionStrategy(
        [cix[s] for s in indices_by_area],
        cp_model.CHOOSE_LOWEST_MIN,
        cp_model.SELECT_MIN_VALUE,
    )

    # Set a time limit
    if tlim is not None:
        solver.parameters.max_time_in_seconds = tlim

    # Set solver parameters
    solver.parameters.num_search_workers = num_workers
    solver.parameters.log_search_progress = True
    solver.parameters.search_branching = cp_model.FIXED_SEARCH

    # Solve
    status = solver.Solve(model)

    # Extract results
    sol = dict()
    if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
        for l in range(max_layers):
            sol[f"o_{l}"] = solver.Value(ol[l])
            for s in range(n_superitems):
                sol[f"z_{s}_{l}"] = solver.Value(zsl[s, l])
        for s in range(n_superitems):
            sol[f"c_{s}_x"] = solver.Value(cix[s])
            sol[f"c_{s}_y"] = solver.Value(ciy[s])
        sol["objective"] = solver.ObjectiveValue()

    # Return solution and solving time
    return sol, solver.WallTime()
Beispiel #27
0
def pricing_problem_no_placement_cp(
    superitems_pool, pallet_dims, duals, feasibility=None, tlim=None, enable_output=False
):
    """
    Solve the pricing subproblem no-placement using a CP approach
    """
    logger.info("SP-NP-CP defining variables and constraints")

    # Model and solver
    mdl = cp_model.CpModel()
    slv = cp_model.CpSolver()

    # Utility
    fsi, _, _ = superitems_pool.get_fsi()
    ws, ds, hs = superitems_pool.get_superitems_dims()
    n_superitems, n_items = fsi.shape

    # Variables
    ol = mdl.NewIntVar(0, max(hs), f"o_l")
    zsl = [mdl.NewBoolVar(f"z_{s}_l") for s in range(n_superitems)]

    # Constraints
    # Redundant valid cuts that force the area of
    # a layer to fit within the area of a bin
    mdl.Add(
        cp_model.LinearExpr.Sum(ws[s] * ds[s] * zsl[s] for s in range(n_superitems))
        <= pallet_dims.area
    )

    # Define the height of layer l
    for s in range(n_superitems):
        mdl.Add(ol >= hs[s] * zsl[s])

    # Enforce feasible placement
    if feasibility is not None:
        logger.info(f"SP-NP-MIP feasibility: max number of selected items <= {feasibility}")
        mdl.Add(cp_model.LinearExpr.Sum(zsl[s] for s in range(n_superitems)) <= feasibility)

    # No item repetition constraint
    for i in range(n_items):
        mdl.Add(cp_model.LinearExpr.Sum([fsi[s, i] * zsl[s] for s in range(n_superitems)]) <= 1)

    # Objective
    obj = ol - cp_model.LinearExpr.Sum(
        int(np.ceil(duals[i])) * fsi[s, i] * zsl[s]
        for i in range(n_items)
        for s in range(n_superitems)
    )
    mdl.Minimize(obj)

    # Search strategy
    duals_sort_index = utils.argsort(
        [sum([fsi[s, i] * duals[i] for i in range(n_items)]) for s in range(n_superitems)]
    )
    mdl.AddDecisionStrategy([ol], cp_model.CHOOSE_FIRST, cp_model.SELECT_MIN_VALUE)
    mdl.AddDecisionStrategy(
        [zsl[s] for s in duals_sort_index],
        cp_model.CHOOSE_FIRST,
        cp_model.SELECT_MAX_VALUE,
    )

    # Set a time limit in seconds
    if tlim is not None:
        slv.parameters.max_time_in_seconds = tlim

    # Solve
    slv.parameters.num_search_workers = 4
    slv.parameters.log_search_progress = enable_output
    slv.parameters.search_branching = cp_model.FIXED_SEARCH
    status = slv.Solve(mdl)

    # Extract results
    objective = float("inf")
    superitems_in_layer = None
    if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
        logger.info(f"SP-NP-CP solved")

        # Extract objective value
        objective = slv.ObjectiveValue()
        logger.debug(f"SP-NP-CP objective: {objective}")

        # Extract selected superitems
        superitems_in_layer = [s for s in range(n_superitems) if slv.Value(zsl[s]) == 1]
        logger.debug(f"SP-NP-CP selected {len(superitems_in_layer)}/{n_superitems} superitems")

        logger.debug(f"SP-NP-CP computed layer height: {slv.Value(ol)}")
    else:
        logger.warning("SP-NP-CP unfeasible")

    logger.debug(f"SP-NP-CP time: {slv.WallTime()}")
    return objective, superitems_in_layer