示例#1
0
        def __init__(self, num_particles: int = 0, dims: int = 0, n: int = 0):

            self.logger = get_my_logger(os.path.join(__file__ + 'log'))
            self.num_particles = num_particles
            self.dims = dims
            self.n = n
            self.swarm = [Particle(n, dims) for _ in range(num_particles)]
            self.optimum = 0.0
            self.error_thres = 0.0
            self.best_global_solution = 0.0

            self.c1 = self.c2 = 1.494  # constant for update of weights

            self.start_global_update = 0
            self.stop_global_update = 0
            self.v_brake = 0.0

            self.func_name = ""
            self.func = None

            self.error_rates = []
            self.evaluations = []
            self.iteration = 0
            self.lowest_speed = 0.01
            self.best_global_solution = sys.maxsize
            self.best_global_point = np.zeros(self.dims)
            self.max_vel = np.zeros(self.dims)
            # add random speedup -> that's actually not random @todo
            self.rand_speed_factor = self.dims**2
            self.v_brake = self.n / (self.n / self.n * 2
                                     )  # to slow down the swarm
            self.chaos_flag = False
            self.brake_flag = False
    def run(self):
        if not self.is_setup:
            self.setup()

        iteration = 0
        while iteration < self.MaxIteration:
            # print(f'Iteration: {iteration}',end='\r')
            for p in self.particles:
                self.velocityUpdate.update(p,
                                           self.gbest.position,
                                           iteration=iteration)
                self.positionUpdate.update(p)
                # print(p,p.fitness,p.pbest_fitness)
                self.fitness(p)

            for p in self.particles:
                if p.fitness < p.pbest_fitness:
                    p.pbest_position = p.position
                    p.pbest_fitness = p.fitness
                    # print(f'Atualizando pbest: {p} -> {p.pbest_fitness}')

                    if p.fitness < self.gbest.fitness:
                        self.gbest = Particle(position=p.position,
                                              velocity=p.velocity,
                                              fitness=p.fitness)
                        # print(f'Atualizando gbest: {self.gbest} -> {self.gbest.fitness}')

            iteration += 1

        return self.gbest
示例#3
0
 def chaos_for_velocity(self, particle: Particle, i: int):
     r1 = np.random.ranf(self.dims)
     if np.sum(r1) < np.sum(particle.v):
         particle.v *= r1 * self.rand_speed_factor
         ri = np.random.randint(particle.dims)
         # constant factor to keep the chaos realistic
         particle.x[ri] = r1[0] * (
             (2 * self.n) - self.n) * self.ws[i] * 0.45
示例#4
0
    def __init__(self, func_fitness, dimensions, num_particles, max_iterations, inertia_weight, cognitive_constant,
                 social_constant, initial_position=None):
        self.convergence_time = -1
        self.best_position = []
        self.best_error = -1
        self.error_history = []
        self.dimensions = dimensions
        self.num_particles = num_particles
        self.max_iterations = max_iterations
        self.particle_positions = []
        self.best_position_history = []

        err_best_g = -1  # best error for group
        pos_best_g = []  # best position for group
        err_best_g_list = []

        start_time = time.time()

        for i in range(self.max_iterations):
            self.particle_positions.append([])

        # establish the swarm
        swarm = []
        for i in range(0, num_particles):
            swarm.append(Particle(self.dimensions, inertia_weight, cognitive_constant, social_constant))

        # begin optimization loop
        i = 0
        while i < self.max_iterations:
            # cycle through particles in swarm and evaluate fitness
            for j in range(0, self.num_particles):
                swarm[j].evaluate(func_fitness)

                # determine if current particle is the best (globally)
                if swarm[j].err < err_best_g or err_best_g == -1:
                    pos_best_g = list(swarm[j].position)
                    err_best_g = float(swarm[j].err)

            # Saving the best error values
            err_best_g_list.append(err_best_g)
            self.error_history.append(err_best_g)

            # cycle through swarm and update velocities and position
            for j in range(0, self.num_particles):
                x, y, z = swarm[j].position
                self.particle_positions[i].append([x, y, z])
                # swarm[j].update_velocity_intertia(pos_best_g)
                swarm[j].update_velocity_clerc(pos_best_g)
                swarm[j].update_position()
            i += 1
            a, b, c = pos_best_g
            self.best_position_history.append([a, b, c])

        self.convergence_time = time.time() - start_time
        self.best_error = err_best_g

        # update final results squared_error
        self.best_position = pos_best_g
    def initialize_particles(self):

        bounds = self.bounds
        del self.particles
        self.particles = list()
        self.gbest = None

        for _ in range(0, self.NroParticles):
            position = np.random.uniform(*bounds, size=self.dimensions)
            velocity = np.random.uniform(*bounds, size=self.dimensions)
            fitness = self.costFunction(position)

            particle = Particle(position, velocity, fitness=fitness)

            if self.gbest is None:
                self.gbest = Particle(position, velocity, fitness=fitness)
            elif fitness < self.gbest.fitness:
                self.gbest = Particle(position, velocity, fitness=fitness)

            self.particles.append(particle)
示例#6
0
 def update_velocity(self, particle: Particle, i: int):
     r = np.random.ranf(self.dims)
     particle.v = self.ws[i] * particle.v + r[0] * self.c1 \
                  * (particle.best_point - particle.x)
     if i > self.start_global_update:
         # start global update not in the beginning, but after a
         # set interval to increase diversity
         particle.v += self.c2 * r[1] * (self.best_global_point -
                                         particle.x)
     p_vel_abs = np.sum(np.abs(particle.v))
     if p_vel_abs < self.lowest_speed:
         # don't fall asleep...
         particle.v *= r
     # slow down particles and multiply for extra rand
     if self.brake_flag:
         particle.v = np.array([
             min(max(v, -self.v_brake), self.v_brake)
             for v in particle.v
         ] * r)
     # update highest velocity
     if np.sum(np.abs(self.max_vel)) < p_vel_abs:
         self.max_vel = particle.v
def main():
    dimensions = 10
    generations = 100
    max_speed_percentage = 0.1
    neighborhood = 10
    particles = [Particle() for i in range(0, 20)]
    phi_one_max = 2.05
    phi_two_max = 2.05

    pso_algorithm_helper = PSOAlgorithm(neighborhood, generations, particles,
                                        phi_one_max, phi_two_max,
                                        max_speed_percentage, dimensions)

    pso_algorithm_helper.do_algorithm()
示例#8
0
    def __init__(self, parent, level, n=10, dim=2):
        """
            Simple Implementation of a Tree Node. The Nodes are supposed
            to stay at their level, only the particle is exchanged for simplicity.

        :param data:
        :param parent:
        :param level:
        :param: n feldausweitung
        """
        self.level = level
        self.children = []
        self.parent = parent
        self.particle = Particle(n, dim)
        self.weight = 1 # to adjust weight vHPSO, ^HPSO
示例#9
0
    def update_ph_pso(self, node: Node):
        """"
        todo

        ph-pso: reset leafs, randomize local-best
        in this step, the tree is "one"

        """

        if node:
            if node.level <= 1:
                pass
            elif node.level == 2:
                r = np.random.ranf(self.tree.dims)
                # 're-randomizing', could be not enough.
                node.particle.x *= 0.479 * r
            elif node.level == 3:
                node.particle = Particle(n=self.tree.n, dims=self.tree.dims)
            else:
                return
            for child in node.children:
                self.update_ph_pso(child)
示例#10
0
    def run(self, function_optimization: AbstractFunctionOptimization) -> list:
        search_space = Space(target=self.target
                             , target_error=self.target_error
                             , n_particles=self.n_particles
                             , function_optimization=function_optimization)
        particles_vector = [Particle(lim_min=function_optimization.lim_min, lim_max=function_optimization.lim_max, length=function_optimization.number_of_inputs) for _ in range(search_space.n_particles)]
        search_space.particles = particles_vector
        search_space.print_particles()

        iteration = 0
        while iteration < self.n_iterations:
            print("Iteration = %s" % iteration)
            search_space.set_pbest()
            search_space.set_gbest()

            if abs(search_space.gbest_value - search_space.target) <= search_space.target_error:
                break

            search_space.move_particles(W=self.W, c1=self.c1, c2=self.c2)
            iteration += 1

        return search_space.gbest_position
示例#11
0
文件: ipso.py 项目: pwalan/battery
def pso(input):
    """
    算法入口
    :param input:{'soc': cur_soc, 't': [], 'voltage': [], 'current': []}
    :return:
    """
    start_time = time.clock()
    # 算法参数
    N = 250  # 粒子个数
    iter_num = 2000  # 迭代次数
    upper_limit = [600, 100, 600, 300]  # 参数搜索上界
    lower_limit = [400, 1, 100, 10]  # 参数搜索下届
    particles = []  # 粒子信息,存储在类Particle中
    c1 = 2
    c2 = 2  # c1和c2为学习因子
    gbest = [-10000000000.0, []]  # 所有粒子中最好的值和位置
    # w = 0.3 # 惯性权重,调节对解空间的搜索能力

    # 初始化
    vmax = [(upper_limit[i] - lower_limit[i]) / (N * 1000)
            for i in range(len(upper_limit))]  # 最大速度
    for i in range(N):
        w = 0.9 - (0.9 - 0.1) / N * i  # 动态惯性权重
        particle = Particle([], [], 0.0, [], 0.0)
        p = []
        v = []
        for k in range(len(upper_limit)):
            # 更新位置的每个分量
            pk = random.random() * upper_limit[k]
            if pk >= upper_limit[k]:
                pk = upper_limit[k] - vmax[k] / 100
            elif pk <= lower_limit[k]:
                pk = lower_limit[k] + vmax[k] / 100
            p.append(pk)
            # 更新速度的每个分量
            vk = random.random() * vmax[k]
            if vk >= vmax[k]:
                vk = vmax[k] - 0.1
            v.append(vk)
        particle.position = p
        particle.velocity = v
        particle.value = obj_func(particle.position, input)
        particle.best_value = particle.value
        particle.best_postion = particle.position
        # 查找所有粒子中最大值
        if gbest[0] < particle.best_value:
            gbest = [particle.best_value, particle.position]
        particles.append(particle)

    # print("初始化后最优值:", gbest)

    # 将计算过程存入文件
    now = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    filename = '/Users/alanp/Downloads/param/' + now + ".csv"
    # f = open(filename, 'a')
    # f.write("迭代次数,适应度值,计算耗时,聚集度\n")

    # 算法开始
    for i in range(iter_num):
        for_start = time.clock()
        # 上一次最优点位置
        old_best = gbest[1][:]
        aggregation = 0
        for j in range(N):
            p = particles[j].position
            v = particles[j].velocity
            pbest = particles[j].best_postion
            newV = []
            newP = []
            for k in range(len(p)):
                # 更新速度的每个分量
                vk = w * v[k] + c1 * random.random() * (
                    pbest[k] - p[k]) + c2 * random.random() * (gbest[1][k] -
                                                               p[k])
                if math.fabs(vk) >= vmax[k]:
                    if vk < 0:
                        vk = -vmax[k]
                    else:
                        vk = vmax[k]
                newV.append(vk)
                # 更新位置的每个分量
                pk = p[k] + vk
                if pk >= upper_limit[k]:
                    pk = upper_limit[k] - vmax[k] / 100
                elif pk <= lower_limit[k]:
                    pk = lower_limit[k] + vmax[k] / 100
                newP.append(pk)
            particles[j].velocity = newV
            particles[j].position = newP
            # 计算目标函数值
            particles[j].value = obj_func(particles[j].position, input)
            # 更新个体极值
            if particles[j].best_value < particles[j].value:
                particles[j].best_value = particles[j].value
                particles[j].best_postion = particles[j].position
            # 查找所有粒子中最大值
            if gbest[0] < particles[j].best_value:
                gbest = [particles[j].best_value, particles[j].position]
            # 计算聚集度
            v1 = numpy.array(particles[j].position)
            v2 = numpy.array(old_best)
            aggregation += numpy.sqrt(numpy.sum(numpy.square(v1 - v2)))

        # print("迭代次数:" + str(i + 1))
        # print("最佳值:" + str(gbest[0]))
        # print("最佳点:", gbest[1])
        # print("聚集度", aggregation / N)
        # f.write(str(i + 1) + "," + str(-gbest[0]) + "," + str(time.clock() - for_start) + "," + str(aggregation / N) + "\n")
    #f.close()
    time_consume = time.clock() - start_time
    print(input["soc"], ': 计算耗时:', time_consume, "s")
    return gbest[1]
示例#12
0
 def update_position(self, particle: Particle):
     # set maximum (so particles can't escape area)
     particle.x = particle.x + particle.v
     for d in range(self.dims):
         particle.x[d] = min(max(particle.x[d], -self.n), self.n)
示例#13
0
def kpso(input):
    """
    算法入口
    :param input:{'soc': cur_soc, 't': [], 'voltage': [], 'current': []}
    :return:
    """
    start_time = time.clock()
    # 算法参数
    N = 200  # 粒子个数
    iter_num = 1000  # 迭代次数
    upper_limit = [600, 0.1, 500, 0.1]  # 参数搜索上界
    lower_limit = [400, 0, 0, 0]  # 参数搜索下届
    particles = []  # 粒子信息,存储在类Particle中
    c1 = 2
    c2 = 2  # c1和c2为学习因子
    gbest = [-10000000000.0, []]  # 所有粒子中最好的值和位置
    w = 0.5  # 惯性权重,调节对解空间的搜索能力
    aggregation_threshold = 100  # 聚集度的阈值
    first_Ath = False  # 是否第一次到达聚集度阈值
    start_kmeans = False  # 开始按照分群进行操作
    K = 3  # 聚类数
    gbest_k1 = [-10000000000.0, []]  # 种群1(最近)中最好的值和位置
    gbest_k2 = [-10000000000.0, []]  # 种群2(中等)中最好的值和位置
    gbest_k3 = [-10000000000.0, []]  # 种群3(最远)中最好的值和位置

    # 初始化
    vmax = [(upper_limit[i] - lower_limit[i]) / 500.0
            for i in range(len(upper_limit))]  # 最大速度
    for i in range(N):
        particle = Particle([], [], 0.0, [], 0.0)
        p = []
        v = []
        for k in range(len(upper_limit)):
            # 更新位置的每个分量
            pk = random.random() * upper_limit[k]
            if pk >= upper_limit[k]:
                pk = upper_limit[k] - vmax[k] / 100
            elif pk <= lower_limit[k]:
                pk = lower_limit[k] + vmax[k] / 100
            p.append(pk)
            # 更新速度的每个分量
            vk = random.random() * vmax[k]
            if vk >= vmax[k]:
                vk = vmax[k] - 0.1
            v.append(vk)
        particle.position = p
        particle.velocity = v
        particle.value = obj_func(particle.position, input)
        particle.best_value = particle.value
        particle.best_postion = particle.position
        # 查找所有粒子中最大值
        if gbest[0] < particle.best_value:
            gbest = [particle.best_value, particle.position]
        particles.append(particle)

    print("初始化后最优值:", gbest)

    # 将计算过程存入文件
    now = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    filename = '/Users/alanp/Downloads/param/' + now + ".csv"
    f = open(filename, 'a')
    f.write("迭代次数,适应度值,计算耗时,聚集度\n")

    # 算法开始
    for i in range(iter_num):
        for_start = time.clock()
        # 上一次最优点位置
        old_best = gbest[1][:]
        aggregation = 500
        for j in range(N):
            p = particles[j].position
            v = particles[j].velocity
            pbest = particles[j].best_postion
            newV = []
            newP = []
            for k in range(len(p)):
                # 更新速度的每个分量
                if not start_kmeans:
                    vk = w * v[k] + c1 * random.random() * (
                        pbest[k] -
                        p[k]) + c2 * random.random() * (gbest[1][k] - p[k])
                else:
                    # 在聚集度小于阈值时切换更新策略
                    vk = w * v[k] + c1 * random.random() * (
                        pbest[k] - p[k]) + c2 * random.random() * (
                            0.5 * (gbest_k1[1][k] - p[k]) + 0.3 *
                            (gbest_k2[1][k] - p[k]) + 0.2 *
                            (gbest_k3[1][k] - p[k]))
                if math.fabs(vk) >= vmax[k]:
                    if vk < 0:
                        vk = -vmax[k]
                    else:
                        vk = vmax[k]
                newV.append(vk)
                # 更新位置的每个分量
                pk = p[k] + vk
                if pk >= upper_limit[k]:
                    pk = upper_limit[k] - vmax[k] / 100
                elif pk <= lower_limit[k]:
                    pk = lower_limit[k] + vmax[k] / 100
                newP.append(pk)
            particles[j].velocity = newV
            particles[j].position = newP
            # 计算目标函数值
            particles[j].value = obj_func(particles[j].position, input)
            # 更新个体极值
            if particles[j].best_value < particles[j].value:
                particles[j].best_value = particles[j].value
                particles[j].best_postion = particles[j].position
            # 查找所有粒子中最大值
            if gbest[0] < particles[j].best_value:
                gbest = [particles[j].best_value, particles[j].position]
            # 计算聚集度
            v1 = numpy.array(particles[j].position)
            v2 = numpy.array(old_best)
            aggregation += numpy.sqrt(numpy.sum(numpy.square(v1 - v2)))

        # 判断聚集度是否小于阈值
        if aggregation / N < aggregation_threshold:
            if not first_Ath:
                first_Ath = True
                start_kmeans = True
            if start_kmeans:
                start_kmeans = False
                # 使用kemans进行一次分群
                print("开始分群")
                dataSet = []
                for k in range(N):
                    dataSet.append(particles[k].position)
                centroids, clusterAssment = kmeans(numpy.mat(dataSet), K)
                clusterAssment = clusterAssment.tolist()
                k_mark = []
                obj_tmps = []
                for l in range(K):
                    obj_tmps.append(abs(obj_func(centroids[l], input)))
                for l in range(K):
                    if obj_tmps[l] == min(obj_tmps):
                        k_mark.append(1)
                    elif obj_tmps[l] == max(obj_tmps):
                        k_mark.append(3)
                    else:
                        k_mark.append(2)
                print(obj_tmps)
                print(k_mark)
            # 根据分群结果计算gbest_k1、k2、k3
            for m in range(N):
                tmp = int(clusterAssment[m][0])
                if tmp == 0:
                    if gbest_k1[0] < particles[j].best_value:
                        gbest_k1 = [
                            particles[m].best_value, particles[m].position
                        ]
                elif tmp == 1:
                    if gbest_k2[0] < particles[j].best_value:
                        gbest_k2 = [
                            particles[m].best_value, particles[m].position
                        ]
                elif tmp == 2:
                    if gbest_k3[0] < particles[j].best_value:
                        gbest_k3 = [
                            particles[m].best_value, particles[m].position
                        ]
            print(gbest_k1)
            print(gbest_k2)
            print(gbest_k3)
            print("...")

        print("迭代次数:" + str(i + 1))
        print("最佳值:" + str(gbest[0]))
        print("最佳点:", gbest[1])
        print("聚集度", aggregation / N)
        f.write(
            str(i + 1) + "," + str(-gbest[0]) + "," +
            str(time.clock() - for_start) + "," + str(aggregation / N) + "\n")

    f.close()
    time_consume = time.clock() - start_time
    print('计算耗时:', time_consume, "s")
    return gbest[1], time_consume