Ejemplo n.º 1
0
def _refine_solution(sol, D, d, C, L, minimize_K):
    # refine until stuck at a local optima
    local_optima_reached = False
    while not local_optima_reached:
        sol = without_empty_routes(sol)
        if not minimize_K:
            sol.append(0)  #make sure there is an empty route to move the pt to

        # improve with relocation and keep 2-optimal
        sol = do_local_search([do_1point_move, do_2opt_move], sol, D, d, C, L,
                              LSOPT.BEST_ACCEPT)

        # try to redistribute the route with smallest demand
        sol = without_empty_routes(sol)
        routes = RouteData.from_solution(sol, D, d)
        min_rd = min(routes, key=lambda rd: rd.demand)
        routes.remove(min_rd)

        if not minimize_K:
            routes.append(RouteData())

        if __debug__:
            log(
                DEBUG, "Applying do_redistribute_move on %s (%.2f)" %
                (str(sol), objf(sol, D)))

        redisribute_result = do_redistribute_move(
            min_rd,
            routes,
            D,
            d,
            C,
            L,
            strategy=LSOPT.FIRST_ACCEPT,
            #Note: Mole and Jameson do not specify exactly
            # how the redistribution is done (how many
            # different combinations are tried).
            # Increase the recombination_level if the
            # for more agressive and time consuming search
            # for redistributing the customers on other
            # routes.
            recombination_level=0)
        redisribute_delta = redisribute_result[-1]

        if (redisribute_delta is not None) and\
           (minimize_K or redisribute_delta<0.0):

            updated_sol = RouteData.to_solution(redisribute_result[:-1])
            if __debug__:
                log(DEBUG - 1,
                    ("Improved from %s (%.2f) to %s (%.2f)" %
                     (sol, objf(sol, D), updated_sol, objf(updated_sol, D))) +
                    "using inter route heuristic do_redistribute_move\n")
            sol = updated_sol
        else:
            local_optima_reached = True
            if __debug__:
                log(DEBUG - 1, "No move with do_redistribute_move\n")
    return sol
Ejemplo n.º 2
0
 def call_init(points, D, d, C, L, st, wtt, single, minimize_K):
     if minimize_K:
         # todo: remove this when supprot (see TODO notes in algo desc)
         raise NotImplementedError("Nearest neighbor algorithm does "+
                                   " not support minimizing the number"+
                                   " of vehicles")
       
     sol_snn = nearest_neighbor_init(D, d, C, L, emerging_route_count=1)            
     if single:
         return sol_snn
     
     auto_route_count = sol_snn.count(0)-1
     
     # NN is so fast we can try with several K and take the best
     best_sol = sol_snn
     best_f = objf(sol_snn,D)
     best_K = auto_route_count
     for k in range(2,auto_route_count+1):
         sol = nearest_neighbor_init(D, d, C, L, emerging_route_count=k)
         sol = without_empty_routes(sol)
         sol_f = objf(sol,D)
         sol_K = sol.count(0)-1
         
         if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
             best_sol = sol
             best_f = sol_f
             best_K = sol_K
             
     return best_sol
Ejemplo n.º 3
0
def wren_holliday_init(points,
                       D,
                       d,
                       C,
                       L=None,
                       minimize_K=False,
                       seed_node=BEST_OF_FOUR,
                       direction='both',
                       full_convergence=True):
    """ This implements the Wren and Holliday improvement heuristic. The
    initial solution is generated using the generic sweep procedure of
    `sweep.py`, and the improvement procedure works as specified in 
    Wren & Holliday (1972) Fig 1 (Flowchart of program). Basically, the 
    refining processes, also known as local search improvement /
    post-optimization phase, repeadedly applies local search operators to 
    improve the initial solutions and returns the best one.
    
    * D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
    * d is a list of demands. d[0] should be 0.0 as it is the depot.
    * C is the capacity constraint limit for the identical vehicles.
    * L is the optional constraint for the maximum route cost/duration/length.
    
    * seed_node sets how many different seed nodes are tried for the Sweep
       based initial solution generation. If LEAST_DENSE, the sweep is started
       from the direction from the depot that has lowest customer density. The
       same applies to BEST_OF_FOUR (default), but also 3 other directions
       spaced by ~90deg are considered. Also a complete (but costly) generation
       of all possible initial solutions with BEST_ALTERNATIVE can be used. 
    * direction can be 'ccw' for counter-clockwise Sweep, 'cw' (default) for
       clockwise or 'both' for trying both.
    * full_convergence determines if the improvement is stopped after the
       "delete" operation fails the first time (False) or if the local search
       continues until no operation is capable of finding an improving more 
       (True, default).
       
    Returns the solution.
    
    Wren, A., & Holliday, A. (1972). Computer scheduling of vehicles from one
    or more depots to a number of delivery points. Journal of the Operational
    Research Society, 23(3), 333-344.
    """

    if not points:
        raise ValueError(
            "The algorithm requires 2D coordinates for the points")
    N = len(D)

    # Calculate the sweep coordinates
    # this is 99% same as _get_sweep..., but we need to get also
    #  the node_phis for later use, so duplicated some code here.
    np_pts = points if isinstance(points, np.ndarray) else np.asarray(points)
    depot_x, depot_y = points[0]
    node_rhos, node_phis = cart2pol(np_pts[:, 0] - depot_x,
                                    np_pts[:, 1] - depot_y)
    sweep = get_sweep_from_polar_coordinates(node_rhos, node_phis)
    sweep_phis = sweep[0]

    directions = ['cw', 'ccw'] if direction == 'both' else [direction]
    sweeps = []

    for cur_dir in directions:
        if seed_node == BEST_OF_FOUR or seed_node == LEAST_DENSE:
            # Wren & Holliday method of selecting starting customers for the
            #  Sweep initialization from the "least dense direction".
            # Turn the polar coordinates so that the smallest angles are to the
            #  direction of the smallest density, weighted by their demands.
            o = np.array([depot_x, depot_y])
            if d:
                weighted_xy_from_origin = np.multiply(
                    np_pts[1:] - o, np.transpose(np.array([d[1:], d[1:]])))
                avgx, avgy = np.average(weighted_xy_from_origin, axis=0)
            else:
                avgx, avgy = np.average(np_pts[1:] - o, axis=0)
            avg_rho, avg_phi = cart2pol(np.array([avgx]), np.array([avgy]))
            # to range [-pi, pi]
            if avg_phi > 0:
                least_dense_phi = avg_phi[0] - pi
            else:
                least_dense_phi = avg_phi[0] + pi

            # evenly spaced by pi/2
            for i in range(4):
                angle_tgt = least_dense_phi + i * pi / 2
                if angle_tgt > pi:
                    angle_tgt -= pi * 2

                # take the first satisfying the condition
                start_from_here = np.argmax(sweep_phis > angle_tgt)
                start_node = start_from_here-1 if cur_dir=="cw" \
                                                 else start_from_here
                if start_node == -1:
                    start_node = N - 2
                sweeps.append((start_node, cur_dir))

                if seed_node == LEAST_DENSE:
                    break  # do not take the ones at 90 deg intervals

        elif seed_node == BEST_ALTERNATIVE:
            nodes = list(range(N - 1))
            sweeps.extend(zip(nodes, [cur_dir] * len(nodes)))
        elif type(seed_node) == int:
            sweeps.append([seed_node, cur_dir])

    ## PHASE 1 : "Generate ... initial solutions and choose the best"
    initial_sols = []

    try:
        for start_node, cur_dir in sweeps:
            isol = sweep_init(sweep,
                              D,
                              d,
                              C,
                              L,
                              seed_node=[start_node],
                              direction=cur_dir,
                              routing_algo=None)
            initial_sols.append(isol)

    except KeyboardInterrupt as e:  # or SIGINT
        # if interrupted on initial sol gen, return the best of those
        if len(e.args) > 0 and type(e.args[0]) is list:
            initial_sols.append(e.args[0])
        if not initial_sols:
            raise e
        else:
            best_isol, best_if, best_iK = None, float('inf'), float('inf')
            for isol in initial_sols:
                isol_f = objf(isol, D)
                isol_K = isol.count(0) - 1
                if is_better_sol(best_if, best_iK, isol_f, isol_K, minimize_K):
                    best_isol = isol
                    best_if = isol_f
                    best_iK = isol_K
            raise KeyboardInterrupt(best_isol)

    best_sol, best_f, best_K = None, float('inf'), float('inf')
    interrupted = False
    for sol in initial_sols:

        # Construct an array of RouteData objects for local search improvement
        #  heuristics to use
        routes = RouteData.from_solution(sol, D, d)

        if __debug__:
            log(DEBUG, "Improving solution %s (%.2f)" % (sol, objf(sol, D)))
            _log_after_ls_op("SWEEP(S)", False, routes, D)

        ## PHASE 2 : Improvement phase (see Wren & Holliday 1974, Figure 1)

        # Variables storing the state
        converging = False
        deleted = True
        omitted_nodes = set()
        prev_Q_point_sol_f = None
        prev_iteration_sol_f = None
        changed = False

        try:
            while True:
                _remove_empty_in_place(routes)
                if not minimize_K:
                    # +1 empty route can allow local search to find an improvement
                    routes.append(RouteData())

                changed = False

                ## "INSPECT, SINGLE" ##
                # run 2opt on each route to remove any crossing edges
                inspect_improved = inspect_heuristic(routes, D, C, d, L)
                if inspect_improved and minimize_K:
                    _remove_empty_in_place(routes)
                changed |= inspect_improved
                if __debug__: _log_after_ls_op("INSPECT", changed, routes, D)

                # move a node to a better position on the route or other routes
                single_improved = single_heuristic(routes, D, C, d, L)
                if single_improved and minimize_K:
                    _remove_empty_in_place(routes)
                changed |= single_improved
                if __debug__: _log_after_ls_op("SINGLE", changed, routes, D)

                ## "Are customers omitted?" ##
                omitted_were_assigned = False
                if omitted_nodes:
                    inserted_nodes = set()
                    ## "Take omitted ... in order and try to fit into existing routes" ##
                    for node in sorted(list(omitted_nodes)):
                        for rdi, rd in enumerate(routes):
                            _, new_rd, delta = do_insert_move(
                                node, rd, D, d, C, L, LSOPT.BEST_ACCEPT)
                            if delta is not None:
                                routes[rdi] = new_rd
                                inserted_nodes.add(node)
                                omitted_were_assigned = True
                    omitted_nodes -= inserted_nodes

                    if omitted_were_assigned and minimize_K:
                        _remove_empty_in_place(routes)
                    changed |= omitted_were_assigned
                    if __debug__:
                        _log_after_ls_op("INSERT", changed, routes, D)

                ## "Are customers omitted still?" ##
                if omitted_nodes:
                    omitted_were_assigned |= complain_heuristic(
                        routes, omitted_nodes, D, C, d, L)
                    if omitted_were_assigned and minimize_K:
                        _remove_empty_in_place(routes)
                    changed |= omitted_were_assigned
                    if __debug__:
                        _log_after_ls_op("COMPLAIN", changed, routes, D)

                sol_f = 0
                for rd in routes:
                    sol_f += rd.cost

                ## Q-point : "Has distance been reduced by more that 5% OR
                # has a previously omitted customer been assigned?" ##
                if (prev_Q_point_sol_f is None) or\
                   (sol_f<prev_Q_point_sol_f*0.95) or \
                   omitted_were_assigned:
                    prev_Q_point_sol_f = sol_f
                    converging = False
                    continue
                else:
                    prev_Q_point_sol_f = sol_f
                    converging = True

                ## "Is problem small?" -> PAIR ##
                if len(D) <= 80:
                    pair_improved = pair_heuristic(routes, D, C, d, L)
                    if pair_improved and minimize_K:
                        _remove_empty_in_place(routes)
                    changed |= pair_improved
                    if __debug__: _log_after_ls_op("PAIR", changed, routes, D)

                ## "Is deleted true?" ##
                if deleted:
                    # "DELETE" -> "Was delete succesful?" ##
                    deleted = delete_heuristic(routes, D, C, d, L)
                    if deleted and minimize_K:
                        _remove_empty_in_place(routes)
                    changed |= deleted
                    if __debug__:
                        _log_after_ls_op("DELETE", changed, routes, D)

                ## DISENTANGLE ##
                disentangle_improved = disentangle_heuristic(
                    routes, sweep, node_phis, D, C, d, L)
                if disentangle_improved and minimize_K:
                    _remove_empty_in_place(routes)
                changed |= disentangle_improved
                if __debug__:
                    _log_after_ls_op("DISENTANGLE", changed, routes, D)

                ## "Has situation changed in interation?" ##
                solution_changed_between_iterations = True
                if prev_iteration_sol_f:
                    if prev_iteration_sol_f == sol_f:
                        solution_changed_between_iterations = False
                prev_iteration_sol_f = sol_f

                if converging and ((full_convergence and not changed) or
                                   (not full_convergence and not deleted) or
                                   (not solution_changed_between_iterations)):
                    ## STOP ##
                    break

        except KeyboardInterrupt:
            interrupted = True

        # return the optimized solutios
        sol = [0] + [n for r in routes for n in r.route[1:]]
        # LS may cause empty routes
        sol = without_empty_routes(sol)
        sol_K = sol.count(0) - 1
        sol_f = objf(sol, D)

        if __debug__:
            log(DEBUG, "Improved solution %s (%.2f)" % (sol, sol_f))

        if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
            best_sol = sol
            best_f = sol_f
            best_K = sol_K

        if interrupted:
            raise KeyboardInterrupt(best_sol)

    return best_sol
Ejemplo n.º 4
0
def cmt_2phase_init(D, d, C, L=None, minimize_K=False,
                    lambda_multiplier=2.0, mu_multiplier=1.0,
                    phase1_seed_selection_method = "farthest",
                    phase2_choose_most_associated_route = True,
                    phase2_repeated_association_with_n_routes = 1,
                    number_of_randomized_retries = None):
    
    """ Implementation of the Christofides, Mingozzi & Toth (1979) two phase
    heuristic. In the first phase a customer is selected to act as a seed node 
    and initialize a route. Then, a savings criteria parametrized with
    lambda_multiplier is used to determine which customers to insert.
    Insertions are done until a constraint is violated and then a new seed is
    selected and the insertions continue. This is repeated until no unrouted
    customers remain or we run out of route seeds. Finally, the routes are made
    r-optimal with 3-opt. 
    
    The seed customers are carried over the the second phase of the algorithm.
    Here, each customer is associated to a seed customer based on a second 
    savings criteria parametrized with mu_multiplier. Also the next closest 
    seed customer has an effect to the score used when associating the nodes.
    Then, a route is built around each seed customer with the nodes associated
    to that route taking care not to violate feasibility of the route. Finally,
    if a feasible solution was generated, the routes from the second phase 
    are made r-optimal with 3-opt. 
    
    A better of the solutions from the first and second phases is selected 
    and returned.
    
    Note that the default parameters are for a deterministic variant of the 
    stochastic algorithm described in (Christofides et al 1979).
    
    Basic parameters:
    * D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
    * d is a list of demands. d[0] should be 0.0 as it is the depot.
    * C is the capacity constraint limit for the identical vehicles.
    * L is the optional constraint for the maximum route cost/length/duration.
    
    Objective parameter:  
    * minimize_K sets the primary optimization objective. If set to True, it is
       the minimum number of routes and the current best is always replaced
       with a solution with smaller K. If set to False (default) the algorithm 
       optimizes only for the mimimum solution/routing cost. 
    
    Route shape parameters:
    * lambda_multiplier   specifies how closely the customer is associated to 
                           the emerging route seed customer in the first phase.
    * mu_multiplier       specifies how closely the customer is associated to
                           route seed customers in the second phase.

    The implementation includes some improvements to the CMT (1979) algorithm
    to improve the chance of second phase producing feasible solutions:
        
    * phase1_seed_selection_method
                          instead of selecting a seed customer for emerging 
                           route at random in the first phase, select the 
                           "farthest" or "closest" to the depot or the one with 
                           the "biggest" demand. Can also be "first", which 
                           will be random if randomized_retries is set.
                           
    * phase2_choose_most_associated_route   
                           instead of building the routes in random order in 
                           phase 2, start from the route with most associated
                           customers. If set to False implements the original
                           behaviour of (CMT 1979).
    
    * phase2_repeated_association_with_n_routes           
                           if set to None, the original behaviour of (CMT 1979)
                           is used. That is, terminate phase 2 without an
                           feasible solution if the first route building pass
                           over the route seed customers leaves unrouted
                           customers when S=0. If this is set to 1, the
                           procedure is repeated until a) all customers are
                           routed or b) no feasible insertions can be made.
                           If this parameter is set to be >1, also insertion of
                           2. best alternatives to associate with a seed
                           customers are tried. Can also be "K". Then the
                           number of routes generated in the first phase is
                           used as the value of this parameter.
    * number_of_randomized_retries  
                           If None the algorithm is deterministic. If set to an 
                           integer value. The first phase can generate this 
                           many seed customer configurations to second phase 
                           in case second phase is unable to produce feasible
                           solutions.
    """
    
    if phase1_seed_selection_method=="first":
        seed_f = _first_seed
    elif phase1_seed_selection_method=="farthest":
        seed_f = _farthest_seed
    elif phase1_seed_selection_method=="closest":
        seed_f = _closest_seed
    elif phase1_seed_selection_method=="biggest":
        seed_f = _biggest_seed
    
    rr = number_of_randomized_retries 

    best_sol = None
    best_f = None
    best_K = None
    interrupted = False
    
    while (rr is None) or (rr>0):
        
        phase1_sol, phase1_f, phase1_K = None, float("inf"), float("inf")
        phase2_sol, phase2_f, phase2_K = None, float("inf"), float("inf")
        
        try:
            phase1_seeds, phase1_sol, phase1_f, rr = \
                _phase_one(lambda_multiplier,D,d,C,L, seed_f, rr)
            phase1_K = len(phase1_seeds)
            
            # extension to CMT, option to associate customers multiple times 
            #  (to other routes, starting from the route with minimal eps).
            associate_routes = phase2_repeated_association_with_n_routes
            if phase2_repeated_association_with_n_routes=="K":
                associate_routes = phase1_K
                
            phase2_K, phase2_sol, phase2_f, rr = \
                _phase_two(mu_multiplier,phase1_seeds,D,d,C,L, rr,
                    phase2_choose_most_associated_route, associate_routes)
        
        except KeyboardInterrupt as e: #or SIGINT
            # Phase 1 OR phase 2 was interrupted. 
            if len(e.args)>0 and type(e.args[0]) is list:
                if phase1_sol is None:
                    phase1_sol = without_empty_routes(e.args[0])
                    phase1_f = objf(phase1_sol)
                    phase1_K = phase1_sol.count(0)-1
                    
                elif phase2_sol is None:
                    phase2_sol = without_empty_routes(e.args[0])
                    phase2_f = objf(phase2_sol)
                    phase2_K = phase2_sol.count(0)-1
            interrupted = True
        
        # Pick the better out of the two
        p1_better_than_p2 = is_better_sol(phase2_f, phase2_K,
                                          phase1_f, phase1_K, minimize_K)
        p1_best_so_far    = is_better_sol(best_f, best_K,
                                          phase1_f, phase1_K, minimize_K)
        p2_best_so_far    = is_better_sol(best_f, best_K,
                                          phase2_f, phase2_K, minimize_K)
        if p1_better_than_p2 and p1_best_so_far:
                best_sol = phase1_sol
                best_f = phase1_f
                best_K = phase1_K
        if not p1_better_than_p2 and p2_best_so_far:
                best_sol = phase2_sol
                best_f = phase2_f
                best_K = phase2_K
        
        if interrupted:
            # pass on the current best solution
            raise KeyboardInterrupt(best_sol)
        
        # deterministic version, no retries
        # stochastic version terminates as soon as phase2 succeeds
        if (rr is None) or (phase2_sol is not None):
            break

        
    return best_sol
Ejemplo n.º 5
0
def lr3opt_init(D,
                d,
                C,
                L,
                initial_lambda1_C=None,
                initial_lambda1_L=None,
                initialization_algorithm=_init_with_tsp,
                postoptimize_with_3optstar=True,
                max_concecutive_lamba_incs=None):
    """ An implementation of the Stewart & Golden [1]_ 3-opt* heuristic
    with Lagrangean relaxation.
    
    The algorithm starts from a solution that can be either feasible or
    infeasible and uses local search to move towards better and feasible 
    solutions. More specifically, it works by replacing the constraint checks 
    of the 3-opt* with a penalty that depends on how much the constraint was 
    violated. The 3-opt* that operates on the entire solution, that is, checks 
    for both intra and inter route moves on one pass, was used. The penalties
    are iteratively doubled as the local search progresses and it is assumed
    that this eventually forces the solutions to feasible region.

    .. [1] Stewart, W. R. and Golden, B. L. (1984). A lagrangean relaxation 
           heuristic for vehicle routing. European Journal of Operational
           Research, 15(1):84–88.
    
    Parameters
    ----------
    D : numpy.ndarray
        is the full 2D distance matrix.
    d : list
        is a list of demands. d[0] should be 0.0 as it is the depot.
    C : float
        is the capacity constraint limit for the identical vehicles.
    L : float
        is the optional constraint for the maximum route length/duration/cost.
    
    initial_lambda1_C : float
        is the initial Langrange multiplier value for the capacity constraint C.
        If left empty (None) the formula ``l1_C=average(d)/(20*max(D))`` is used.
        The alternative value suggested by Stewart & Golden (1984) was 0.05.
    initial_lambda1_L : float
        is the initial Langrange multiplier value for the maximum route cost/
        duration/length constraint. If left empty (None) the formula
        ``l1_L=average(distance to nearest neighbor)/(10*max(D))`` is used.
    initialization_algorithm (function): is a function that retuns a TSP or VRP 
        solution and its objective function value. The default is to use LKH TSP 
        solution, but the function _init_with_random can be used to replicate the
        results of Stewart & Golden (1984) where a random solution is used.
    
    Returns
    -------
    list
        The solution as a list of node indices to visit.
    
    .. todo:: due to how the algorithm works, introducing minimize_K would require
    balancing between penalizing constraint violations and penalizing new 
    routes with an additional multipiler. This was not implemented.
    """

    sol = None
    try:
        ## STEP 1: Generate an initial solution
        sol, initial_f = initialization_algorithm(D, d, C, L)

        max_D = None
        lambdas = [initial_lambda1_C, initial_lambda1_L]
        if C and lambdas[0] == None:
            max_D = _get_max(D, sol)
            lambdas[0] = np.average(d) / (20 * max_D)
        if L and lambdas[1] == None:
            # Stewart & Golden (1984) did not propose an extension for the maximum
            #  route duration/length/cost constraint, but here we have something
            #  similar to L than they used for C constraint relaxation.
            max_D = _get_max(D, sol) if (max_D is None) else max_D
            closest_neighbor_D = D.copy()
            np.fill_diagonal(closest_neighbor_D, max_D)
            lambdas[1] = np.average(
                closest_neighbor_D.min(axis=0)) / (10 * max_D)

        if __debug__:
            log(
                DEBUG,
                "Start from initial solution %s (%.2f), and with l1=%.2f, l2=%.2f"
                % (sol, calculate_objective(sol, D),
                   (0 if lambdas[0] is None else lambdas[0]),
                   (0 if lambdas[1] is None else lambdas[1])))

        checker_function = partial(_check_lr3opt_move, lambdas=lambdas)

        # STEP 2: Solve the relaxed problem using 3-opt*
        c_lambda_incs = 0
        while True:
            # Make sure there is an empty route (for giving the 3-opt* procedure
            #  the option of adding vehicles)
            while not (sol[-1] == 0 and sol[-2] == 0):
                sol += [0]

            if __debug__:
                log(
                    DEBUG - 2, "Finding a LR3OPT move for %s (%.2f)" %
                    (sol, calculate_objective(sol, D)))
            new_sol, delta = do_3optstar_move(sol,
                                              D,
                                              d,
                                              C,
                                              L,
                                              strategy=LSOPT.FIRST_ACCEPT,
                                              move_checker=checker_function)

            # local optima reached, tighten the relaxation
            # TODO: it should not happen that the sol==new_sol. However it happens and as a quickfix check for it.
            if delta is None or sol == new_sol:
                # return the first feasible solution (note: does not check for covering)
                if fast_constraint_check(sol, D, d, C, L):
                    if __debug__:
                        log(
                            DEBUG, "Reached feasible solution %s (%.2f)" %
                            (sol, calculate_objective(sol, D)))
                    while postoptimize_with_3optstar:
                        opt_sol, delta = do_3optstar_move(
                            sol, D, d, C, L, strategy=LSOPT.FIRST_ACCEPT)
                        if delta is None:
                            return normalize_solution(
                                sol)  # remove any [0,0]'s
                        else:
                            sol = opt_sol
                            #print("REMOVEME improved with post-optimization 3-opt*")
                            log(
                                DEBUG,
                                "Found improving 3-opt* move leading to %s (%.2f)"
                                % (sol, calculate_objective(sol, D)))

                    return normalize_solution(sol)  # remove any [0,0]'s
                else:
                    # STEP 3: Update lambdas
                    lambda_at_inf = False
                    if lambdas[0] is not None:
                        lambdas[0] = lambdas[0] * 2
                        lambda_at_inf = lambdas[0] == float('inf')
                    if lambdas[1] is not None:
                        lambdas[1] = lambdas[1] * 2
                        lambda_at_inf = lambda_at_inf or lambdas[0] == float(
                            'inf')
                    if __debug__:
                        log(
                            DEBUG - 1,
                            "No improving moves left, increasing lambda to l1=%.2f, l2=%.2f"
                            % ((0 if lambdas[0] is None else lambdas[0]),
                               (0 if lambdas[1] is None else lambdas[1])))
                    #print("No improving moves left, increasing lambda to l1=%.2f, l2=%.2f"%
                    #        ((0 if lambdas[0] is None else lambdas[0]),
                    #         (0 if lambdas[1] is None else lambdas[1])))

                    #TODO: if penalty >> cost, break (stuck on a infeasible region)
                    # how much bigger can be determined by finding such a
                    # pathological problem instance?

                    # safeguard for getting stuck
                    c_lambda_incs += 1
                    #print("REMOVEME: c_lambda_incs", c_lambda_incs)
                    if lambda_at_inf or (
                            max_concecutive_lamba_incs is not None
                            and c_lambda_incs > max_concecutive_lamba_incs):
                        return _force_feasible(sol, D, d, C, L)

            else:
                if __debug__:
                    log(
                        DEBUG,
                        "Found improving LR3OPT move leading to %s (%.2f)" %
                        (new_sol, calculate_objective(new_sol, D)))
                    log(
                        DEBUG - 2, "However, routes %s remain infeasible." % [
                            r for r in sol2routes(new_sol)
                            if not fast_constraint_check(r, D, d, C, L)
                        ])

                sol = new_sol
                c_lambda_incs = 0

    except KeyboardInterrupt:  # or SIGINT
        # Pass on the current solution forced feasbile by splitting routes
        #  according to the constraints.
        raise KeyboardInterrupt(_force_feasible(sol, D, d, C, L))

    return without_empty_routes(sol)
Ejemplo n.º 6
0
def mole_jameson_insertion_init(D,
                                d,
                                C,
                                L=None,
                                minimize_K=False,
                                strain_criterion='all'):
    """ This is the implementation of Mole and Jameson (1976) cheapest
    insertion algorithm. The emerging route is first initialized according to
    which strain criterion (insertion cost calculation method) is used,
    On each step an unrouted customer for which the insertion cost is lowest
    (between any two nodes on the emerging route) is searched and the insertion
    made until no feasible insertions remain. For details see the insertion 
    implementation in cheapest_insertion.py:cheapest_insertion_init
    
    * strain_criterion can be one of 
        - 'proximity_ranking'
        - 'min_strain'
        - 'clarke_wright'
        each implementing a sligtly different insertion criteria
        - 'gaskell'
        - 'augumented_min_strain'
        try several values and
        - 'all' (default)
        tries all of the above
        
    For 'clarke_wright' and 'gaskell' and when lambda is 2.0 in
    'augumented_min_strain' the routes are initialized according to the 
    primary value of the current strain criteria. For 'proximity_ranking',
    'min_strain', and rest of 'augumented_min_strain' the emerging route
    is initialized with farthest unrouted customer.
    
    Mole, R. and Jameson, S. (1976). A sequential route-building algorithm 
      employing a generalised savings criterion. Journal of the Operational
      ResearchSociety, 27(2):503-511.
    """

    callback_configurations = []
    if strain_criterion == 'proximity_ranking' or strain_criterion == 'all':
        callback_configurations.append(
            (_create_new_criteria_function(lm=0.0, mm=0.0), "farthest"))

    if strain_criterion == 'min_strain':
        #or strain_criterion=='all': # <- this is already  is in 'augumented_min_strain'
        callback_configurations.append(
            (_create_new_criteria_function(lm=0.0, mm=1.0), "farthest"))

    if strain_criterion == 'clarke_wright':
        #or strain_criterion=='all': # <- this is already in 'augumented_min_strain'
        callback_configurations.append(
            # when mu = lambda-1,  initiate route with savings criteria
            (_create_new_criteria_function(lm=2.0, mm=1.0), "strain"))

    if strain_criterion == 'gaskell' or strain_criterion == 'all':
        lambda_mults = [1.25, 1.5, 1.75, 2.0]
        for glm in lambda_mults:
            callback_configurations.append(
                # when mu = lambda-1,  initiate route with savings criteria
                (_create_new_criteria_function(lm=glm,
                                               mm=glm - 1.0), "strain"))

    if strain_criterion == 'augumented_min_strain' or strain_criterion == 'all':
        # the lm=2.0, mm=1.0 is already in 'gaskell'
        lambda_mults = [0, 0.5, 1, 1.5] if strain_criterion=='all' else\
                       [0, 0.5, 1, 1.5, 2.0]
        for alm in lambda_mults:
            callback_configurations.append(
                (_create_new_criteria_function(lm=alm, mm=1.0),
                 "strain" if alm - 1.0 == 1.0 else "farthest"))

    ## Find the best solution among the active strain criterions
    best_sol = None
    best_f = None
    best_K = None
    interrupted = False
    for strain_function, init_method in callback_configurations:

        sol, sol_f, sol_K = None, float('inf'), float('inf')
        try:
            sol = cheapest_insertion_init(
                D,
                d,
                C,
                L,
                minimize_K=False,
                emerging_route_count=1,
                initialize_routes_with=init_method,
                insertion_strain_callback=strain_function,
                insert_callback=_try_insert_2opt_and_update)
            sol = _refine_solution(sol, D, d, C, L, minimize_K)
            # LS may make some of the routes empty
            sol = without_empty_routes(sol)

        except KeyboardInterrupt as e:  #or SIGINT
            # some of the strain function insertion runs was interrupted
            if len(e.args) > 0 and type(e.args[0]) is list:
                sol = e.args[0]
                interrupted = True

        if sol:
            sol_f = objf(sol, D)
            sol_K = sol.count(0) - 1
        if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
            best_sol = sol
            best_f = sol_f
            best_K = sol_K

        if interrupted:
            raise KeyboardInterrupt(best_sol)

    return best_sol
Ejemplo n.º 7
0
    def solve_problems(self,
                       algo_name,
                       instance_idx='all',
                       round_D_func=None,
                       round_f_func=None,
                       cost_compare=True,
                       require_K=False,
                       suppress_constraint_check=False):
        """
        Solves the problems set up in the setUp using the algo_name. The other
        arguments are:
            
        instance_idx: if 'all' (default), solve all problems. If int, solve the
         problem with that index. If a iterable of ints, solve those problems.
            
        round_D_func: nxn matrix -> nxn matrix, operation that can be used to
         modify the costs of the distance matrix. Usually e.g. np.int_,
         np.around etc.
        
        round_f_func: float -> int/float, the function used to round the result
         (i.e. solution quality).
        
        cost_compare: compare the solution against the target in self.target by
         calculating the quality with the distance or cost matrix (cost matrix
         includes service times).
         
        require_K: gives the required number of vehicles K as a parameter for
         the algorithm.
         
        suppress_constraint_check: can be uesd to disable constraint checking.
        """

        algo_idx = (i for i,al in enumerate(self.algorithms)\
                      if al[0]==algo_name).next()
        assert self.algorithms[algo_idx][0] == algo_name

        algo_targets = self.targets[algo_idx]

        diffs = np.zeros(len(algo_targets))

        active_targets = list(enumerate(algo_targets))
        if instance_idx != 'all':
            if hasattr(type(instance_idx), '__iter__'):
                for i in instance_idx:
                    active_targets = [active_targets[i]]
            elif type(instance_idx) is int:
                active_targets = [active_targets[instance_idx]]
            else:
                raise ValueError("Invalid problem index value")

        for problem_idx, target in active_targets:
            if target is None:
                continue
            elif (type(target) is int) or (len(target) == 1):
                target_c = target
                target_k = None
            else:
                target_k, target_c = target

            problem_name = self.problem_names[problem_idx]
            pfn = path.join(BENCHMARKS_BASEPATH, self.problem_path,
                            problem_name)

            #print(pfn)

            sol, sol_f, sol_c, elapsed_t = self._solve_instance(
                self.algorithms[algo_idx][1],
                pfn,
                round_D_func=round_D_func,
                require_K=require_K,
                predefined_k=target_k,
                suppress_constraint_check=suppress_constraint_check)

            sol = without_empty_routes(sol)
            sol_k = sol.count(0) - 1

            if round_f_func:
                sol_f = round_f_func(sol_f + 0.5)
                sol_c = round_f_func(sol_c + 0.5)

            if cost_compare:
                gap = 100.0 * sol_c / target_c - 100.0
                print(algo_name, problem_name, sol_c, "VS.", target_c,
                      "(gap %.2f %%)" % gap, "in %.2f s" % elapsed_t)
            else:
                gap = 100.0 * sol_f / target_c - 100.0
                print(algo_name, problem_name, sol_f, "VS.", target_c,
                      "(gap %.2f %%)" % gap, "in %.2f s" % elapsed_t)

            if ReplicationBase.result_file:
                ReplicationBase.result_file.write(";".join([
                    algo_name, problem_name,
                    str(sol_c) if cost_compare else str(sol_f),
                    str(sol_k),
                    str(target_c),
                    str(target_k),
                    str(gap),
                    str(elapsed_t)
                ]))
                ReplicationBase.result_file.write("\n")
                ReplicationBase.result_file.flush()

            diffs[problem_idx] = gap

        avg_diff = np.average(diffs)
        sd_diff = np.std(diffs)
        print("On average", algo_name, "is within",
              "%.2f %% (SD %.2f %%) of the target.\n" % (avg_diff, sd_diff),
              "\n")

        return avg_diff, sd_diff, np.min(diffs), np.max(diffs)
Ejemplo n.º 8
0
def _solve_set_covering(N,
                        active_ptls,
                        relaxed_ptls,
                        forbidden_combos,
                        allow_infeasible=True,
                        D=None,
                        K=None):
    """A helper function that solves a set covering problem. The inputs are
    a list of node sets and corresponding costs for the set.
    --
    Fisher, M. L. and Jaikumar, R. (1981), A generalized assignment 
    heuristic for vehicle routing. Networks, 11: 109-124.
    """

    m = Model("SCPCVRP")
    nactive = len(active_ptls.routes)
    nrelaxed = len(relaxed_ptls.routes)
    nforbidden = len(forbidden_combos)

    # the order of the keys is important when we interpret the results
    X_j_keys = range(nactive + nrelaxed)
    # variables and the objective
    X_j_costs = active_ptls.costs + relaxed_ptls.costs
    X_j_node_sets = active_ptls.nodes + relaxed_ptls.nodes
    #update forbidden indices to match the current active petal set
    X_j_forbidden_combos = []
    for fc in forbidden_combos:
        if all(i < nactive for i in fc):
            X_j_forbidden_combos.append(
                [i if i >= 0 else -i - 1 + nactive for i in fc])

    #print("REMOVEME: Solving with K=%d, %d node sets, and %d solutions forbidden" % (K, nactive+nrelaxed,nforbidden))

    if __debug__:
        log(
            DEBUG, "Solving over-constrained VRP as a set covering problem " +
            "with %d petals, where %d of the possible configurations are forbidden."
            % (nactive + nrelaxed, nforbidden))
        if nforbidden > 0:
            log(DEBUG - 2, " and with following solutions forbidden:")
            log(DEBUG - 3, "(petal indices = %s)" % str(X_j_forbidden_combos))
            for fc in X_j_forbidden_combos:
                fc_sol = [0]
                for i in fc:
                    if i < nactive:
                        fc_sol.extend(active_ptls.routes[i][1:])
                    else:
                        fc_sol.extend(relaxed_ptls.routes[i - nactive][1:])
                fc_sol = without_empty_routes(fc_sol)
                log(DEBUG - 2, "%s (%.2f)" % (fc_sol, objf(fc_sol, D)))

    X_j = m.addVars(X_j_keys, obj=X_j_costs, vtype=GRB.BINARY, name='x')

    ## constraints
    c1_constrs, c2_constrs, c3_constrs = _add_set_covering_constraints(
        m, N, K, X_j, X_j_keys, X_j_node_sets, X_j_forbidden_combos)

    ## update the model and solve
    m._vars = X_j
    m.modelSense = GRB.MINIMIZE
    m.update()
    # disable output
    m.setParam('OutputFlag', 0)
    m.setParam('TimeLimit', MAX_MIP_SOLVER_RUNTIME)
    m.setParam('Threads', MIP_SOLVER_THREADS)
    #m.write("petalout.lp")
    m.optimize()

    # restore SIGINT callback handler which is changed by gurobipy
    signal(SIGINT, default_int_handler)

    if __debug__:
        log(DEBUG - 2, "Gurobi runtime = %.2f" % m.Runtime)
        if m.Status == GRB.OPTIMAL:
            log(DEBUG - 3,
                "Gurobi objective = %.2f" % m.getObjective().getValue())

    if m.Status == GRB.OPTIMAL:
        return _decision_variables_to_petals(m.X, active_ptls,
                                             relaxed_ptls), True
    elif m.Status == GRB.TIME_LIMIT:
        raise GurobiError(
            10023, "Gurobi timeout reached when attempting to solve SCPCVRP")
    elif m.Status == GRB.INTERRUPTED:
        raise KeyboardInterrupt()
    # Sometimes the solution is infeasible, try to relax it a little.
    elif m.Status == GRB.INFEASIBLE and allow_infeasible:
        return _relax_customer_constraints_with_feasRelax(
            m, c1_constrs, active_ptls, relaxed_ptls)
    return None, False
Ejemplo n.º 9
0
def sweep_init(coordinates, D, d, C, L=None, minimize_K=False,
               direction="both", seed_node=BEST_ALTERNATIVE,
               routing_algo=None, **callbacks):
    """
    This algorithm was proposed in Wren (1971) and in Wren & Holliday
    (1972). Sweep was also proposed in Gillett and Miller (1974) who
    gave the algorithm its name. The proposed variants differ in on how many
    starting locations (seed) for the sweep are considered: four in Wren &
    Holliday (1972) and all possible in both directions in Gillett and Miller
    (1974). Also, the improvement procedures differ. The version in this file
    is basebones as as it does not include any route improvement heuristics.
    For implementations of Gillett and Miller (1974) or  Wren & Holliday (1972)
    algorithms, please see their Python files (gillet_miller_sweep.py and
    wren_holliday_sweep.py).
    
    The basic principle of the Sweep algorithm is simple: The algorithm assumes
    that the distances of the CVRP are symmetric, and, furthermore, that the
    points are located on a plane. The catresian coordinates of these points
    in relation to the depot are converted to polar coordinates (rho, phi), and
    then sorted by phi. Starting from an arbitary node (in this implementation
    the default is the one closest to the depot) create a new route and add 
    next  adjecent unrouted node according to their angular coordinate. Repeat 
    as long as the capacity is not exceeded. When this happens, start a new 
    route and repeat the procedure until all nodes are routed. Finally, the 
    routes can optionally be optimized using a TSP algorithm. 
       
    Note that the algorithm gives different results depending on the direction 
    the nodes are inserted. The direction parameter can be "cw" for clockwise 
    insertion order and "ccw" for counterclockwise. As the algorithm is quite
    fast, it is recommended to run it in both directions.
    
    Please note that the actual implementation of the sweep procedure is in the
     do_one_sweep function.
    
    * coordinates can be either 
        a) a list/array of cartesian coordinates (x,y)
        b) a lists/arrays (3) of polar coodinates WITH node indexes (i.e. a 
            numpy stack of phi,rho,idx)
    * D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
    * d is a list of demands. d[0] should be 0.0 as it is the depot.
    * C is the capacity constraint limit for the identical vehicles.
    * L is the optional constraint for the maximum route length/cost/duration.
    * direction is either "cw" or "ccw" depending on the direction the nodes
       are to be processed
    * seed_node is optional parameter that specifies how the first node of the 
       sweep is determned. This can be one of CLOSEST_TO_DEPOT (0),
       SMALLEST_ANGLE (-1), BEST_ALTERNATIVE (-2), which tries every possible 
       staring id, or a positive integer explicitly specifying the node id to
       start from. Also, a list of indexes can be given. These are explicit
       sweep indexes and it is adviseable to give also the sweep parameter.
    
    Wren, A. (1971), "Computers in Transport Planning and Operation", Ian 
      Allan, London.
    Wren, A., and Holliday, A. (1972), "Computer scheduling of vehicles from
      one or more depots to a number of delivery points", Operations Research
      Quarterly 23, 333-344.
    Gillett, B., and Miller, L., (1974). "A heuristic algorithm for the vehicle
      dispatch problem". Operations Research 22, 340-349.
    """
    
    N = len(D)
    if len(coordinates[0])==2:
        sweep = get_sweep_from_cartesian_coordinates(coordinates)
    elif len(coordinates)==3 and (len(coordinates[0])==len(coordinates[1])==len(coordinates[2])):
        # it is not necessarily to sweep to contain all nodes in D and d
        sweep = coordinates
    else:
        raise ValueError("The coordinates need to be (x,y) or (phi,rho,node_index,sweep_index_for_node-1). Not "+str(coordinates))
        
    
    ## specify the direction
    if direction == "ccw":
        step_incs = [1]    
    elif direction == "cw":
        step_incs = [-1]        
    elif direction == "both":
        step_incs = [1,-1]        
    else:
        raise ValueError("""Only "cw", "ccw", and "both" are valid values for the direction parameter""")

    ## specify where to start
    if seed_node==CLOSEST_TO_DEPOT:
        starts = [np.argmin(sweep[1])]
    elif seed_node==SMALLEST_ANGLE:
        starts = [0]
    elif seed_node==BEST_ALTERNATIVE:
        starts = list(range(0,N-1))
    elif type(seed_node) is int:
        # we interpret it as a node idx
        starts = [np.where(sweep[2]==abs(seed_node)%N)[0][0]]
    elif type(seed_node) is list:
        # we interpret it as a node idx
        starts = seed_node
     
    ## Make sure there is a valid route improvement method
    if routing_algo is None:
        # Default generates the route from the list of nodes in the order they
        #  were swept. Assume that depot (0) is the first of node_set.
        routing_algo = lambda D, node_set: (list(node_set)+[0],
                                            objf(list(node_set)+[0],D))
        
    ## for exteding Sweep with improvement heuristics
    callback_data = None
    intra_route_callback = None
    inter_route_callback = None
    if 'prepare_callback_datastructures' in callbacks:
        pcds_callback = callbacks['prepare_callback_datastructures']
        callback_data = pcds_callback(D,d,C,L,sweep)
    if 'intra_route_improvement' in callbacks:        
        intra_route_callback = callbacks['intra_route_improvement']
    if 'inter_route_improvement' in callbacks:        
        inter_route_callback = callbacks['inter_route_improvement']
        
    ## Do the search with the parameter specified above
    best_sol = None
    best_f = None  
    best_K = None
    
    try:
        for step_inc in step_incs:
            for start in starts:
                if __debug__:
                    log(DEBUG, "\nDo a sweep from position %d (n%d) by steps of %d"%
                                 (start,sweep[2][start],step_inc))
                
                ## This does one sweep from one start location to one direction
                routes = do_one_sweep(N, D, d, C, L, routing_algo,
                                           sweep, start, step_inc,
                                           False,
                                           intra_route_callback,
                                           inter_route_callback,
                                           callback_data)            
                    
                sol = [n for rd in routes for n in rd.route[:-1]]+[0]
                # LS of the callbacks may cause empty routes
                sol = without_empty_routes(sol)
                sol_f = objf( sol, D )   
                sol_K = sol.count(0)-1
        
                if __debug__:
                    log(DEBUG, "Previous sweep produced solution %s (%.2f)\n\n" %
                                 (str(sol),sol_f))
                    
                if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
                    best_sol = sol
                    best_f = sol_f
                    best_K = sol_K
    except KeyboardInterrupt: # or SIGINT
        raise KeyboardInterrupt(best_sol)
        
    return best_sol
Ejemplo n.º 10
0
def paessens_savings_init(D,
                          d,
                          C,
                          L,
                          minimize_K=False,
                          strategy="M4",
                          do_3opt=True):
    """
    This implements the Paesses (1988) variant of the parallel savings
     algorithm of Clarke and Wright (1964). The savings function of
     (Paesses 1988) is parametrized with multipiers g and f:
         
         S_ij  = d_0i + d_0j - g * d_ij + f * | d_0i - d_0j |
    
    If two merges have the same savings value, the one where i and j are closer
     to another takes precedence. Otherwise the impelementation details can be 
     read from parallel_savings.py as it contains the actual code implementing 
     the parallel savings procedure. The variant specific parameters are:
     
    * strategy which can be:
        - "M1" for 143 runs of the savings algorithm with all combinations of
           g = np.linspace(0.8, 2.0, num=13)
           f = np.linspace(0.0, 1.0, num=11)            
        - "M4" for 8 runs (g,f) = (1.0,0.1), (1.0,0.5), (1.4,0.0), (1.4,0.5)
           with a parameter combinations +/- 0.1 around the best of these four. 
        - or a list of (g,f) value tuples.
    * do_3opt (default True) optimize the resulting routes to 3-optimality
    
    Note: Due to the use of modern computer, and low priority in computational
     efficiency of this implementation, not all of the tecninques specified in
     "reduction of computer requirements" (Paessens 1988) were employed.
    """

    parameters = []
    if strategy == "M1":
        parameters.extend(
            _cartesian_product(np.linspace(0.8, 2.0, num=13),
                               np.linspace(0.0, 1.0, num=11)))
    elif strategy == "M4":
        parameters.extend([(1.0, 0.1), (1.0, 0.5), (1.4, 0.0), (1.4, 0.5)])
    else:
        parameters.extend(strategy)

    best_params = None
    best_sol = None
    best_f = None
    best_K = None
    interrupted = False

    params_idx = 0
    while params_idx < len(parameters):
        g, f = parameters[params_idx]

        # Note: this is not a proper closure. Variables g and f are shared
        #  over all iterations. It is OK like this, but do not use/store the
        #  lambda after this loop.
        gf_savings = lambda D: paessens_savings_function(D, g, f)

        sol, sol_f, sol_K = None, float('inf'), float('inf')
        try:
            sol = parallel_savings_init(D, d, C, L, minimize_K, gf_savings)
            if do_3opt:
                sol = do_local_search([do_3opt_move], sol, D, d, C, L,
                                      LSOPT.BEST_ACCEPT)
            # 3-opt may make some of the routes empty
            sol = without_empty_routes(sol)
        except KeyboardInterrupt as e:  # or SIGINT
            # some parameter combination was interrupted
            if len(e.args) > 0 and type(e.args[0]) is list:
                sol = e.args[0]
                interrupted = True
        if sol:
            sol_f = objf(sol, D)
            sol_K = sol.count(0) - 1
        if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
            best_sol = sol
            best_f = sol_f
            best_K = sol_K
            best_params = (g, f)

        if interrupted:
            raise KeyboardInterrupt(best_sol)

        params_idx += 1
        # after the best of 4 for the M4 is found, check 4 more around it
        if params_idx == 4 and strategy == "M4":
            g_prime, f_prime = best_params
            parameters.extend([(g_prime - M4_FINETUNE_STEP, f_prime),
                               (g_prime + M4_FINETUNE_STEP, f_prime),
                               (g_prime, f_prime - M4_FINETUNE_STEP),
                               (g_prime, f_prime + M4_FINETUNE_STEP)])
    return best_sol