Пример #1
0
def MonotonicBasinHopping_batch(f, x, take_step, *args, **kwargs):
    """
    Step for jump is small, as minimum cluster together.
    Jump from current min until n_no improve reaches the lim, then the jump is random again.
    """
    f_batch = kwargs.get('f_batch', False)  # Function allows for batch eval
    f_opt = kwargs.get('f_opt', None)  # function to optimze locally
    nind = kwargs.get('nind', 100)
    niter = kwargs.get('niter', 100)
    niter_success = kwargs.get('niter_success', 50)
    bnds = kwargs.get('bnds', None)
    jumpMagnitude_default = kwargs.get(
        'jumpMagnitude', 0.1)  # Small jumps to search around the minimum
    tolGlobal = kwargs.get('tolGobal', 1e-5)

    niter_local = kwargs.get('niter_local', 50)
    tolLocal = kwargs.get('tolLocal', 1e2)

    n_itercounter = 1
    n_noimprove = np.zeros((nind))

    Best = x
    bestMin = seqEval(f, x)
    previousMin = seqEval(f, x)
    jumpMagnitude = np.ones((nind)) * jumpMagnitude_default
    accepted = np.zeros((nind))
    feasibility = np.zeros((nind))

    while n_itercounter < niter:
        n_itercounter += 1

        # Change decision vector to find one within the bounds
        x_test = np.zeros(np.shape(x))
        for ind in range(nind):
            feasible = False
            while feasible == False and bnds != None:
                x_test[ind] = take_step.call(x[ind], jumpMagnitude[ind])
                feasible = check_feasibility(x_test[ind], bnds)

        # Local optimization
        if f_opt != None:
            currentMin = f_opt(x_test)
            solutionLocal = x_test
        else:
            currentMin = np.zeros((nind))
            solutionLocal = np.zeros(np.shape(x))
            for ind in range(nind):
                solLocal = spy.minimize(f, x_test[ind], method = 'SLSQP', \
                tol = tolLocal, bounds = bnds, options = {'maxiter': niter_local} )
                solutionLocal[ind] = solLocal.x

                feasibility[ind] = check_feasibility(solLocal.x, bnds)
                if feasibility[ind] == False:
                    print("Warning: Point out of limits")

            currentMin = seqEval(f, solutionLocal)

        for ind in range(nind):

            # Check te current point from which to jump: after doing a long jump or
            # when the solution is improved
            if jumpMagnitude[ind] == 1 or currentMin[ind] < previousMin[ind]:
                x[ind] = solutionLocal[ind]

            # Check improvement
            if currentMin[ind] < bestMin[ind] and feasibility[
                    ind] == True:  # Improvement
                Best[ind] = x[ind]
                bestMin[ind] = currentMin[ind]
                accepted[ind] = True
                # If the improvement is not large, assume it is not improvement
                if (previousMin[ind] - currentMin[ind]) < tolGlobal:
                    n_noimprove[ind] += 1
                else:
                    n_noimprove[ind] = 0
                jumpMagnitude[ind] = jumpMagnitude_default

            elif n_noimprove[ind] == niter_success:  # Go to a bigger jump
                accepted[ind] = False
                jumpMagnitude[ind] = 1
                n_noimprove[
                    ind] = 0  # Restart count so that it performs jumps around a point
            else:
                accepted[ind] = False
                jumpMagnitude[ind] = jumpMagnitude_default
                n_noimprove[ind] += 1

            previousMin[ind] = currentMin[ind]

        # Save results every 5 iter
        if n_itercounter % 20 == 0:
            AL_BF.writeData(Best, 'w', './OptSol/allMBH_batch.txt')

        print("iter", n_itercounter)
        print("Current min vs best one", min(currentMin), min(bestMin))
        print_fun(min(currentMin), min(bestMin),
                  np.count_nonzero(accepted == True))

    # Print solution
    # print_sol(Best, bestMin, n_itercounter, niter_success)
    return Best, bestMin
Пример #2
0
def EvolAlgorithm_cons(f, bounds, *args, **kwargs):
    """
    EvolAlgorithm: evolutionary algorithm
    INPUTS:
        f: function to be analyzed
        x: decision variables
        bounds: bounds of x to initialize the random function
        x_add: additional parameters for the function. As a vector
        ind: number of individuals. 
        cuts: number of cuts to the variable
        tol: tolerance for convergence
        max_iter: maximum number of iterations (generations)
        max_iter_success
        elitism: percentage of population elitism
        mut: mutation rate
        immig: migration rate (new individuals)
        cons: list of functions to constrain a function. Function should return a vector. return[0] = 0 if unfeasible, return[1] returns penalty
    """
    x_add = kwargs.get('x_add', False)
    ind = kwargs.get('ind', 100)
    cuts = kwargs.get('cuts', 1)
    tol = kwargs.get('tol', 1e-4)
    max_iter = kwargs.get('max_iter', 1e3)
    max_iter_success = kwargs.get('max_iter_success', 1e2)
    elitism = kwargs.get('elitism', 0.1)
    mut = kwargs.get('mutation', 0.01)
    immig = kwargs.get('immig', 0.01)
    cons = kwargs.get('cons', None)

    ###############################################
    ###### GENERATION OF INITIAL POPULATION #######
    ###############################################
    pop_0 = np.zeros([ind, len(bounds) + 1])
    for i in range(len(bounds)):
        pop_0[:, i + 1] = np.random.rand(ind) * (bounds[i][1] -
                                                 bounds[i][0]) + bounds[i][0]

    ###############################################
    ###### FITNESS EVALUATION               #######
    ###############################################
    if x_add == False:  # No additional arguments needed
        for i in range(ind):
            pop_0[i, 0] = f(pop_0[i, 1:])
    else:
        for i in range(ind):
            pop_0[i, 0] = f(pop_0[i, 1:], x_add)

    for j in range(ind):
        for i in range(len(cons)):
            feas = cons[i](pop_0[j, 1:])  # evaluate constraint function
            if feas[0] == 0:  # it is unfeasible
                pop_0[:, 0] += feas[1]  # Add penalty to unfeasible ones

    Sol = pop_0[pop_0[:, 0].argsort()]
    minVal = min(Sol[:, 0])
    x_minVal = Sol[0, :]

    ###############################################
    ###### NEXT GENERATION                  #######
    ###############################################
    noImprove = 0
    counter = 0
    lastMin = minVal

    Best = np.zeros([max_iter + 1, len(bounds) + 1])
    while noImprove <= max_iter_success and counter <= max_iter:

        ###############################################
        #Generate descendents

        #Elitism
        ind_elit = int(round(elitism * ind))

        children = np.zeros(np.shape(pop_0))
        children[:, 1:] = Sol[:, 1:]

        #Separate into the number of parents
        pop = np.zeros(np.shape(pop_0))
        pop[:ind_elit, :] = children[:ind_elit, :]  #Keep best ones
        np.random.shuffle(children[:, :])  #shuffle the others

        for j in range((len(children) - ind_elit) // 2):
            if len(bounds) == 2:
                cut = 1
            else:
                cut = np.random.randint(1, len(bounds) - 1)

            pop[ind_elit + 2 * j, 1:] = np.concatenate(
                (children[2 * j, 1:cut + 1], children[2 * j + 1, cut + 1:]),
                axis=0)
            pop[ind_elit + 2 * j + 1, 1:] = np.concatenate(
                (children[2 * j + 1, 1:cut + 1], children[2 * j, cut + 1:]),
                axis=0)
        if (len(children) - ind_elit) % 2 != 0:
            pop[-1, :] = children[-ind_elit, :]

        #Mutation
        for i in range(ind):
            for j in range(len(bounds)):
                if np.random.rand(1) < mut:  #probability of mut
                    pop[i, j +
                        1] = np.random.rand(1) * (bounds[j][1] -
                                                  bounds[j][0]) + bounds[j][0]

        #Immigration
        ind_immig = int(round(immig * ind))
        for i in range(len(bounds)):
            pop[-ind_immig:, i +
                1] = np.random.rand(ind_immig) * (bounds[i][1] -
                                                  bounds[i][0]) + bounds[i][0]

        ###############################################
        # Fitness
        if x_add == False:  # No additional arguments needed
            for i in range(ind):
                pop[i, 0] = f(pop[i, 1:])
        else:
            for i in range(ind):
                pop[i, 0] = f(pop[i, 1:], x_add)

        for j in range(ind):
            for i in range(len(cons)):
                feas = cons[i](pop[j, 1:])  # evaluate constraint function
                if feas[0] == 0:  # it is unfeasible
                    pop[:, 0] += feas[1]  # Add penalty to unfeasible ones

        Sol = pop[pop[:, 0].argsort()]
        minVal = min(Sol[:, 0])

        ###############################################
        #Check convergence
        if minVal >= lastMin:
            noImprove += 1

        else:
            lastMin = minVal
            x_minVal = Sol[0, 1:]
            noImprove = 0
            Best[counter, :] = Sol[0, :]

        print(counter, "Minimum: ", minVal)
        counter += 1  #Count generations
        if counter % 20 == 0:
            AL_BF.writeData(x_minVal, 'w', 'SolutionEA.txt')

        # print(counter)
    print("minimum:", lastMin)
    print("Iterations:", counter)
    print("Iterations with no improvement:", noImprove)

    return x_minVal, lastMin
Пример #3
0
def MonotonicBasinHopping(f, x, take_step, *args, **kwargs):
    """
    Step for jump is small, as minimum cluster together.
    Jump from current min until n_no improve reaches the lim, then the jump is random again.
    """

    niter = kwargs.get('niter', 100)
    niter_success = kwargs.get('niter_success', 50)
    niter_local = kwargs.get('niter_local', 50)
    bnds = kwargs.get('bnds', None)
    cons = kwargs.get('cons', 0)
    jumpMagnitude_default = kwargs.get(
        'jumpMagnitude', 0.1)  # Small jumps to search around the minimum
    tolLocal = kwargs.get('tolLocal', 1e2)
    tolGlobal = kwargs.get('tolGobal', 1e-5)

    n_itercounter = 1
    n_noimprove = 0

    Best = x
    bestMin = f(x)
    previousMin = f(x)
    jumpMagnitude = jumpMagnitude_default

    while n_itercounter < niter:
        n_itercounter += 1

        # Change decision vector to find one within the bounds
        feasible = False
        while feasible == False and bnds != None:
            x_test = take_step.call(x, jumpMagnitude)
            feasible = check_feasibility(x_test, bnds)
            # if feasible == True:
            #     feasible = check_constraints(x, cons)

        # Local optimization
        # solutionLocal = spy.minimize(f, x, method = 'COBYLA', constraints = cons, options = {'maxiter': niter_local} )
        if type(cons) == int:
            solutionLocal = spy.minimize(f, x_test, method = 'SLSQP', \
                tol = tolLocal, bounds = bnds, options = {'maxiter': niter_local} )
        else:
            solutionLocal = spy.minimize(f, x_test, method = 'SLSQP', \
                tol = tolLocal, bounds = bnds, options = {'maxiter': niter_local},\
                constraints = cons )
        currentMin = f(solutionLocal.x)
        feasible = check_feasibility(solutionLocal.x, bnds)

        # if feasible == True: # jump from current point even if it is not optimum
        #     x = solutionLocal.x

        # Check te current point from which to jump: after doing a long jump or
        # when the solution is improved
        if jumpMagnitude == 1 or currentMin < previousMin:
            x = solutionLocal.x

        # Check improvement
        if currentMin < bestMin and feasible == True:  # Improvement
            Best = x
            bestMin = currentMin
            accepted = True

            # If the improvement is not large, assume it is not improvement
            if (previousMin - currentMin) < tolGlobal:
                n_noimprove += 1
            else:
                n_noimprove = 0
            jumpMagnitude = jumpMagnitude_default

        elif n_noimprove == niter_success:  # Not much improvement
            accepted = False
            jumpMagnitude = 1
            n_noimprove = 0  # Restart count so that it performs jumps around a point
        else:
            accepted = False
            jumpMagnitude = jumpMagnitude_default
            n_noimprove += 1

        previousMin = currentMin

        # Save results every 5 iter
        if n_itercounter % 20 == 0:
            AL_BF.writeData(Best, 'w', 'SolutionMBH_self.txt')

        print("iter", n_itercounter)
        print("Current min vs best one", currentMin, bestMin)
        print_fun(f, x, accepted)

    # Print solution
    # print_sol(Best, bestMin, n_itercounter, niter_success)
    return Best, bestMin
Пример #4
0
def EvolAlgorithm_integerinput(f, bounds, *args, **kwargs):
    """
    EvolAlgorithm_integerinput: evolutionary algorithm, some inputs will be fixed to integers if decided
    INPUTS:
        f: function to be analyzed
        x: decision variables
        bounds: bounds of x to initialize the random function
        x_add: additional parameters for the function. As a vector
        ind: number of individuals. 
        cuts: number of cuts to the variable
        tol: tolerance for convergence
        max_iter: maximum number of iterations (generations)
        max_iter_success
        elitism: percentage of population elitism
        bulk_fitness: if True, the data has to be passed to the function all 
                    at once as a matrix with each row being an individual
        int_input: force some inputs to be integers. Vector with zero (not integer) or one if integer
    """
    x_add = kwargs.get('x_add', False)
    ind = kwargs.get('ind', 100)
    cuts = kwargs.get('cuts', 1)
    tol = kwargs.get('tol', 1e-4)
    max_iter = kwargs.get('max_iter', 1e3)
    max_iter_success = kwargs.get('max_iter_success', 1e2)
    elitism = kwargs.get('elitism', 0.1)
    mut = kwargs.get('mutation', 0.01)
    bulk = kwargs.get('bulk_fitness', False)
    int_input = kwargs.get('int_input', np.zeros(len(bounds)))

    def f_evaluate(pop_0):
        if bulk == True:
            if x_add == False:
                pop_0[:, 0] = f(pop_0[:, 1:])
            else:
                pop_0[:, 0] = f(pop_0[:, 1:], x_add)
        else:
            if x_add == False:  # No additional arguments needed
                for i in range(ind):
                    pop_0[i, 0] = f(pop_0[i, 1:])
            else:
                for i in range(ind):
                    pop_0[i, 0] = f(pop_0[i, 1:], x_add)

        return pop_0

    ###############################################
    ###### GENERATION OF INITIAL POPULATION #######
    ###############################################
    pop_0 = np.zeros([ind, len(bounds) + 1])
    for i in range(len(bounds)):
        pop_0[:, i + 1] = np.random.uniform(low=bounds[i][0],
                                            high=bounds[i][1],
                                            size=ind)
        if int_input[i] != 0:  # Force it to be an integer
            pop_0[:, i + 1] = [int(pop_0[indx, i + 1]) for indx in range(ind)]

    print(pop_0)
    ###############################################
    ###### FITNESS EVALUATION               #######
    ###############################################
    pop_0 = f_evaluate(pop_0)

    Sol = pop_0[pop_0[:, 0].argsort()]
    minVal = min(Sol[:, 0])
    x_minVal = Sol[0, :]

    ###############################################
    ###### NEXT GENERATION                  #######
    ###############################################
    noImprove = 0
    counter = 0
    lastMin = minVal

    Best = np.zeros([max_iter + 1, len(bounds) + 1])
    while noImprove <= max_iter_success and counter <= max_iter:

        ###############################################
        #Generate descendents

        #Elitism
        ind_elit = int(round(elitism * ind))

        children = np.zeros(np.shape(pop_0))
        children[:, 1:] = Sol[:, 1:]

        #Separate into the number of parents
        pop = np.zeros(np.shape(pop_0))
        pop[:ind_elit, :] = children[:ind_elit, :]  #Keep best ones
        np.random.shuffle(children[:, :])  #shuffle the others

        for j in range((len(children) - ind_elit) // 2):
            if len(bounds) == 2:
                cut = 1
            else:
                cut = np.random.randint(1, len(bounds) - 1)

            pop[ind_elit + 2 * j, 1:] = np.concatenate(
                (children[2 * j, 1:cut + 1], children[2 * j + 1, cut + 1:]),
                axis=0)
            pop[ind_elit + 2 * j + 1, 1:] = np.concatenate(
                (children[2 * j + 1, 1:cut + 1], children[2 * j, cut + 1:]),
                axis=0)

        if (len(children) - ind_elit) % 2 != 0:
            pop[-1, :] = children[-ind_elit, :]

        #Mutation
        for i in range(ind):
            for j in range(len(bounds)):
                if np.random.rand(1) < mut:  #probability of mut
                    pop[i, j +
                        1] = np.random.rand(1) * (bounds[j][1] -
                                                  bounds[j][0]) + bounds[j][0]
                    if int_input[i] != 0:  # Force to be an int
                        pop[i, j + 1] = int(pop[i, j + 1])
        ###############################################
        # Fitness
        pop = f_evaluate(pop)

        Sol = pop[pop[:, 0].argsort()]
        minVal = min(Sol[:, 0])

        ###############################################
        #Check convergence
        if minVal >= lastMin:
            noImprove += 1
#         elif abs(lastMin-minVal)/lastMin > tol:
#             noImprove += 1
        else:
            #             print('here')
            lastMin = minVal
            x_minVal = Sol[0, 1:]
            noImprove = 0
            Best[counter, :] = Sol[0, :]

        print(counter, "Minimum: ", minVal)
        counter += 1  #Count generations
        if counter % 20 == 0:
            AL_BF.writeData(x_minVal, 'w', 'SolutionEA.txt')

        # print(counter)
    print("minimum:", lastMin)
    print("Iterations:", counter)
    print("Iterations with no improvement:", noImprove)

    return x_minVal, lastMin