def PSO_purana(costFunc, bounds, maxiter, swarm_init=None):

    num_dimensions = len(swarm_init[0])
    err_best_g = -1  # best error for group
    pos_best_g = []  # best position for group
    num_particles = len(swarm_init)
    # establish the swarm
    swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
    # begin optimization loop
    i = 0
    while i < maxiter:
        #print i,err_best_g
        # cycle through particles in swarm and evaluate fitness
        for j in range(0, num_particles):
            swarm[j]['pos_best_i'], swarm[j]['err_best_i'] = evaluate(
                costFunc, swarm[j])

            # determine if current particle is the best (globally)
            if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
                pos_best_g = list(swarm[j]['position_i'])
                err_best_g = float(swarm[j]['err_i'])

        # cycle through swarm and update velocities and position
        for j in range(0, num_particles):
            swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
            swarm[j]['position_i'] = update_position(bounds, swarm[j])
        i += 1

    # print final results
    #print ('\n')
    #print (pos_best_g,' , ', err_best_g)
    return pos_best_g[0], err_best_g
Beispiel #2
0
def PSO(costFunc,
        bounds,
        maxiter,
        shared_list,
        return_list,
        l,
        num_particles=None,
        swarm_init=None,
        log=True,
        t_list=None):

    num_dimensions = len(swarm_init[0])
    err_best_g = -1  # best error for group
    pos_best_g = []  # best position for group
    num_particles = len(swarm_init)
    # establish the swarm
    swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
    # begin optimization loop
    i = 0
    while i < maxiter:
        #print i,err_best_g
        # cycle through particles in swarm and evaluate fitness
        for j in range(0, num_particles):
            best_pos, swarm[j]['err_best_i'] = evaluate(costFunc, swarm[j])
            swarm[j]['pos_best_i'] = best_pos
            # determine if current particle is the best (globally)
            if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
                pos_best_g = list(swarm[j]['position_i'])
                err_best_g = float(swarm[j]['err_i'])

        # update the global best in the manager list after k iterations
        # we need to add some mutex lock here

        if i == maxiter // 2:
            l.acquire()
            best_galactic_pos = shared_list[0]
            best_galactic_err = shared_list[1]
            #print("best_galactic_err: " ,best_galactic_err)
            #print("best_galactic_pos: ", best_galactic_pos)
            if err_best_g < best_galactic_err and err_best_g != -1:
                shared_list[1] = err_best_g
                #print(err_best_g)
                shared_list[0] = pos_best_g
            else:
                #print("changing pos_best_g from", pos_best_g, " to ", best_galactic_pos)
                #emp_list = []
                err_best_g = float(best_galactic_err)
                #emp_list.append(best_galactic_pos)
                pos_best_g = [best_galactic_pos]

            l.release()
        # cycle through swarm and update velocities and position
        for j in range(0, num_particles):
            swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
            swarm[j]['position_i'] = update_position(bounds, swarm[j])
        if log:
            t_list.append(err_best_g)
        i += 1
    return_list.append(pos_best_g[0])
Beispiel #3
0
def PSO(classifier,
        bounds,
        maxiter,
        shared_list,
        return_list,
        l,
        num_particles=None,
        swarm_init=None,
        pso_train_data=None):

    # create minibatches inside PSO
    num_dimensions = len(swarm_init[0])
    err_best_g = -1  # best error for group
    pos_best_g = []  # best position for group
    num_particles = len(swarm_init)
    #print('adress of classifier object is: ', id(classifier))
    # establish the swarm
    # initialize swarm population
    #print('len(swarm_init): ', len(swarm_init), 'shape of swarm_init[0]: ', swarm_init[0].shape, '\n')
    swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
    # begin optimization loop
    i = 0
    while i < maxiter:
        #print i,err_best_g
        # cycle through particles in swarm and evaluate fitness
        for j in range(0, num_particles):
            best_pos, swarm[j]['err_best_i'] = evaluate(
                classifier, swarm[j], pso_train_data)
            swarm[j]['pos_best_i'] = best_pos
            # determine if current particle is the best (globally)
            if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
                pos_best_g = list(swarm[j]['position_i'])
                err_best_g = float(swarm[j]['err_i'])

        # update the global best in the manager list after k iterations
        # we need to add some mutex lock here

        if i == maxiter // 2:
            l.acquire()
            best_galactic_pos = shared_list[0]
            best_galactic_err = shared_list[1]
            #print("best_galactic_err: " ,best_galactic_err)
            #print("best_galactic_pos: ", best_galactic_pos)
            if err_best_g < best_galactic_err:
                shared_list[1] = err_best_g
                #print(err_best_g)
                shared_list[0] = pos_best_g
            else:
                #print("changing pos_best_g from", pos_best_g, " to ", best_galactic_pos)
                #emp_list = []
                err_best_g = float(best_galactic_err)
                #emp_list.append(best_galactic_pos)
                pos_best_g = [best_galactic_pos]

            l.release()
        # cycle through swarm and update velocities and position
        for j in range(0, num_particles):
            swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
            swarm[j]['position_i'] = update_position(bounds, swarm[j])
        i += 1
    #print('shape of swarm[0][position_i] is: ', swarm[0]['position_i'].shape)
    return_list.append((pos_best_g[0], swarm[:]['position_i']))