Пример #1
0
def RandomSearch(kp: Knapsack, items: List[Item], stopCriteria=10):
    # Random Search implementation

    #  Initialize the variables
    mh = Metaheuristic()
    heuristics = list(heuristicComparison.keys())
    countNone = 0

    while countNone < stopCriteria:
        # Choice randomly the next heuristic
        nextHeuristic = np.random.choice(heuristics)
        kp_candidate = kp.copy()
        items_candidate = items.copy()
        nextItem = SimpleHeuristic(nextHeuristic).apply(
            kp_candidate, items_candidate)

        if nextItem == None or kp_candidate.getValue() <= kp.getValue():
            # Reject the heuristic
            countNone += 1
            continue
        countNone = 0

        # Accept the heuristic
        kp = kp_candidate
        items = items_candidate
        mh.addHeuristic(nextHeuristic)
    return kp, mh
def solver(method: str, kp: Knapsack, items: List[Item], additionalArgs=None):
    if method == 'Heuristic':
        # Execute the heuristic method
        simple_heuristic = SimpleHeuristic(additionalArgs)
        return ConstructiveSolution(kp, items, simple_heuristic).getValue()
    elif method == 'SimulatedAnnealing':
        # Execute the Simulated Annealing metaheuristic
        return solveMetaheuristic(method, kp, items, additionalArgs)
    elif method == 'RandomSearch':
        # Execute the Random Search metaheuristic
        return solveMetaheuristic(method, kp, items, additionalArgs)
    elif method == 'HyperheuristicNaive':
        # Execute the Hyper-heuristic naive model
        hh = HyperheuristicNaive(additionalArgs)
        return ConstructiveSolution(kp, items, hh).getValue()
    elif method == 'Hyperheuristic':
        # Execute the Hyper-heuristic based on LSTM model
        return hyperheuristicSolver(kp, items, additionalArgs).getValue()
    elif method == 'Backtracking':
        # Execute the recursive backtracking method
        return kpBacktracking(kp.getCapacity(), items).getValue()
    elif method == 'DP':
        # Execute the dynamic programming method
        return kpDP(kp.getCapacity(), items).getValue()
    elif method == 'MILP':
        # Execute the mixed integer linear programming method
        return kpMILP(kp.getCapacity(), items).getValue()
    elif method in list(heuristicComparison.keys()):
        # Given the heuristic as method parameter, execute the constructive heuristic method
        simple_heuristic = SimpleHeuristic(method)
        return ConstructiveSolution(kp, items, simple_heuristic).getValue()
    else:
        return 0
Пример #3
0
 def stats(self):
     # Obtain the repetitions of the heuristics for the given sequence
     mhstats = dict()
     for heuristic in list(heuristicComparison.keys()):
         mhstats[heuristic] = 0
     for heuristic in self.sequenceHeuristics:
         mhstats[heuristic] += 1
     return mhstats
Пример #4
0
def SimulatedAnnealing(kp: Knapsack,
                       items: List[Item],
                       n_iterations=100,
                       temp=200,
                       stopCriteria=10):
    # Simulated Annealing implementation
    #  Initialization of the variables
    mh = Metaheuristic()
    heuristics = list(heuristicComparison.keys())
    countNone = 0

    kp_best = kp.copy()
    mh_best = Metaheuristic()
    n_iterations = max(n_iterations, 2 * len(items))
    for i in range(n_iterations):
        if countNone == stopCriteria:
            # Stop criteria met
            break

        # Choice randomly the next heuristic
        nextHeuristic = np.random.choice(heuristics)
        kp_candidate = kp.copy()
        items_candidate = items.copy()
        nextItem = SimpleHeuristic(nextHeuristic).apply(
            kp_candidate, items_candidate)
        if nextItem == None:
            # Heuristic does not change the instance
            countNone += 1
            continue
        countNone = 0

        if kp_best.getValue() < kp_candidate.getValue():
            # Heuristic improve the performance of the solution
            kp_best = kp_candidate.copy()
            mh_best = mh.copy()
            mh_best.addHeuristic(nextHeuristic)

        # Calculate the metropolis variable
        diff = kp.getValue() - kp_candidate.getValue()
        t = temp / (i + 1)
        if -10 <= -diff / t and -diff / t <= 0:
            metropolis = np.exp(-diff / t)
        elif -diff / t <= -10:
            metropolis = 0
        else:
            metropolis = 1
        # Acceptance criteria
        if diff < 0 or np.random.rand() <= metropolis:
            kp = kp_candidate
            items = items_candidate
            mh.addHeuristic(nextHeuristic)
        else:
            countNone += 1
    # Return the best solution reached
    return kp_best, mh_best
    def getHeuristic(self, items: List[Item]):
        # Prepare the characterization of the current state
        newState = np.pad(getAllFeatures(items), (0, self.n_id), 'constant')
        self.previousStates.append(newState)
        
        # Prepare the states considered as input for the LSTM model
        diffStates = self.numPrevStates - len(self.previousStates)
        if diffStates == -1:
            self.previousStates.pop(0)
            diffStates += 1
        while diffStates > 0:
            self.previousStates.append(newState)
            diffStates -= 1

        inputModel = self.previousStates.copy()
        inputModel = np.array(inputModel)
        inputModel = inputModel.reshape((1, self.numPrevStates, len(inputModel[0])))
        # Use the LSTM model
        outputModel = self.model.predict(inputModel)[0]

        # Check the history of the HH
        inputModel = str(self.previousStates)
        if inputModel not in self.timeline:
            self.timeline[inputModel] = set()
        for pastMoves in self.timeline[inputModel]:
            outputModel[pastMoves] = 0
        if sum(outputModel) == 0:
            return None
        outputModel /= sum(outputModel)

        if self.choiceSelector == 'probability':    
            # Choice the heuristic interpreting the output of the LSTM as a probability distribution
            nextMove = np.random.choice(range(len(outputModel)), p = outputModel)
        else:
            # Choice the heuristic that has the maximum value in the output
            nextMove = np.argmax(outputModel)
        # Save the heuristic in the history
        self.timeline[inputModel].add(nextMove)
        # Return the chosen heuristic
        return list(heuristicComparison.keys())[nextMove]
Пример #6
0
    modelPath = 'Cache/hh_lstm.h5'
    resultPath = 'Cache/Performance.csv'

    # First phase
    #  Prepare the train dataset for the hyper-heuristic models
    generateTrainDataset(trainPath, True, trainDataset)

    # Second phase
    #  Train the ten hyper-heuristics based on LSTM
    HH = []
    for i in range(10):
        HH.append(Hyperheuristic('probability', trainModel = True, modelFilename = modelPath, trainFilename = trainPath, prevStates = 2))

    # Third phase
    #  Determine the methods to compare
    heuristics = list(heuristicComparison.keys())[:4]  
    methods = heuristics+['SimulatedAnnealing', 'RandomSearch', 'Hyperheuristic', 'MILP']
    #  Determine the number of times that each method will be repeated
    methodIterations = [1, 1, 1, 1, 10, 10, 10, 1]
    #  Prepare the dict to save the data
    resultsTestDict = dict()  
    for method in methods:
        resultsTestDict[method] = []
        resultsTestDict[f'{method}_time'] = []
    
    #  Obtain the path to each test instance 
    instances = obtainFilenames(tapia_path, testDataset)
    for instance in instances:
        # Read the instance
        n, W, weights, profits = loadInstance(instance)