Beispiel #1
0
def rebuild_cities(cities_nm, neuron_chains,num_depots):

    '''
    rebuild cities_nm
    :param cities:
    :param neuron_chains:
    :return:
    '''

    cities_od = cities_nm.copy()
    depots = cities_nm.head(num_depots)[['x','y']]
    gpids = -np.ones([len(cities_nm),2])
    gpids[num_depots:] = cities_od.iloc[num_depots:][['x', 'y']].apply(
        lambda c: select_closest_gpid(neuron_chains, c),
        axis=1, raw=True).to_numpy()
    idx = 0
    for chain,depot in zip(neuron_chains,depots.to_numpy()):
        gpids[:num_depots,0][idx] = idx
        gpids[:num_depots,1][idx] = select_closest(chain,depot)

        idx+=1


    cities_od['gid'] = gpids[:,0]
    cities_od['pid'] = gpids[:,1]

    cities_od = cities_od.sort_values(['gid', 'pid'],ascending=[True,True])

    return cities_od
Beispiel #2
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()

    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化

    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3
    #n = cities.shape[0] * 3   # 测试用,by EE526

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=4)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        if i % 100 == 0:  # 每隔100次画出神经元图像
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' %
                         (i // 100))

        if not i % 100:  # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10,
                                    network.shape[0])  # 高斯核函数是算法的核心
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9994

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route
Beispiel #3
0
def som(problem, iterations, learning_rate=0.8):
    '''
    Solve the TSP using a Self-Organizing Map.
    :params problem(dataframe): 城市坐标 
    :params iterations(int): 最大迭代次数
    :learning_rate(float): 学习率
    :return route
    '''
    cities = problem.copy()

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # The population size is 8 times the number of cities
    # <Hyperparameter:times>
    n = cities.shape[0] * 8

    # Generate an adequate network of neurons:
    # (n,2)
    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        # <Hyperparameter:radius>
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)
        # Decay the variables
        # <Hyperparameter:decay rate>
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Beispiel #4
0
def get_route(cities, network):
    """Return the route computed by a network."""
    cities['winner'] = cities[['x', 'y'
                               ]].apply(lambda c: select_closest(network, c),
                                        axis=1,
                                        raw=True)

    return cities.sort_values('winner').index
Beispiel #5
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化


    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 4

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=2)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        # if i % 100 == 0:      # 每隔100次画出神经元图像
        #     plot_network(cities, network, name='out_files\process\city_network%d.png'%(i//100))

        if not i % 100:   # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values   # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # print(winner_idx)  # DEBUG
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :], axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            # Generate a filter that applies changes to the winner's gaussian
            gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])  # 高斯核函数是算法的核心
            # Update the network's weights (closer to the city)
            network += gaussian[:, np.newaxis] * learning_rate * (city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99999
        n = n * 0.9995  # 较好的参数是0.9991-0.9997

        if not i % 100:      # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route, dSave
Beispiel #6
0
def som(instancia,problem, iterations, learning_rate,delta_learning_rate, delta_n,sensi_radio,sensi_learning_rate,factor_neuronas,plotear):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtenemos primero las ciudades normalizadas (con coordenadas en [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    #La población de neuronas se crea con factor_neuronas veces la cantidad de ciudades
    n = cities.shape[0] * factor_neuronas

    # Generamos una adecuada red de neuronas de la forma
    network = generate_network(n)

    if plotear:
        print('Red de {} neuronas creadas. Comenzando las iteraciones:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteración {}/{}'.format(i, iterations), end="\r")
        # Se escoge una ciudad de forma aleatoria
        city = cities.sample(1)[['x', 'y']].values
        #Se busca la neurona más cercana a la ciudad, la winner neuron
        winner_idx = select_closest(network, city)
        #Genera un filtro que aplica los cambios al winner o BMU
        gaussian = get_neighborhood(winner_idx, n//10, network.shape[0])
        # Actualizar los pesos de la red según una distribución gaussiana
        network += gaussian[:,np.newaxis] * learning_rate * (city - network)
        
        # actualizar las parametros
        learning_rate = learning_rate * delta_learning_rate
        n = n * delta_n

        # Chequear para plotear cada 1000 iteraciones
        if plotear:
            if not i % 1000:
                plot_network(cities, network, name='imagenes/'+instancia+'/{:05d}.png'.format(i))

        # Chequear si algún parametro a caído por debajo de la sensibilidad
        if n < sensi_radio:
            print('Radio por debajo de sensibilidad, Se ha terminado la ejecución',
            'a {} las iteraciones'.format(i))
            break
        if learning_rate < sensi_learning_rate:
            print('Learning rate por debajo de sensibilidad, Se ha terminado la ejecución',
            'a las {} iteraciones'.format(i))
            break
    else:
        print('Se han completado las {} iteraciones.'.format(iterations))

    if plotear:
        plot_network(cities, network, name='imagenes/'+instancia+'/final.png')


    route = get_route(cities, network)
    if plotear:
        plot_route(cities, route, 'imagenes/'+instancia+'/route.png')
    return route
Beispiel #7
0
def IDtspsom(problem,
             iterations,
             learning_rate=0.8,
             dsigma=0.9995,
             dalpha=0.99997):
    """Solve the TSP using a Self-Organizing Map.
        密度+渗透,n*3
    """
    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=3)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :],
                          axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            gaussian = get_neighborhood(winner_idx, n // 10,
                                        network.shape[0])  # 高斯核函数是算法的核心
            network += gaussian[:, np.newaxis] * learning_rate * (
                city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * dalpha
        n = n * dsigma  # 较好的参数是0.9991-0.9997

        if not i % 100:  # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

        # # Check if any parameter has completely decayed.
        # if n < 1:
        #     print('Radius has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
        # if learning_rate < 0.001:
        #     print('Learning rate has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
    else:
        print('Completed {} iterations.'.format(iterations))
    route = get_route(cities, network)
    return route, dSave
Beispiel #8
0
def get_route(cities, network):
    """
    返回tsp路线
    """
    # 找出距离每个城市最近的神经元
    cities['winner'] = cities[['x', 'y'
                               ]].apply(lambda c: select_closest(network, c),
                                        axis=1,
                                        raw=True)

    # 返回神经元按一定顺序排序的数列
    return cities.sort_values('winner').index
Beispiel #9
0
def get_route(cities, network):
    """
    cities: [DataFrame] the normalized set of cities ['city', 'y', 'x']\n
    network: [numpy.ndarray] the trained network with 8*cities neuron\n
    return: [Index] the index of cities\n
    如果有多个城市被分配到了同一个神经元,那么按照下面的代码排序的时候,这几个会按照之前的顺序(所以这里不是最优)\n
    Return the route computed by a network.\n
    """
    cities['winner'] = cities[['x', 'y']].apply(
        lambda c: select_closest(network, c),  # 计算结果存在新 col winner 里
        axis=1,  # 按行 index 迭代
        raw=True  # the passed function will receive ndarray objects.
    )
    # 先按照'winner'排序(神经元顺序),然后返回index,即对应的城市顺序
    return cities.sort_values('winner').index
Beispiel #10
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 8
    network = generate_network(n)  # Generate an adequate network of neurons:
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if i == 0 or (not i % 1000):
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' %
                         (i // 1000 + 1))

        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        # if 神经元覆盖了城市
        #   print('在第{}次迭代,收敛'.format(i))
    else:
        print('Completed {} iterations.'.format(iterations))
    plot_network(cities, network)
    route = get_route(cities, network)
    plot_route(problem[['x', 'y']], route)
    return route
Beispiel #11
0
def som(problem, iterations, learning_rate=0.8):

    cities = problem.copy()

    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    n = cities.shape[0] * 8

    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)

        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Beispiel #12
0
def som(target,
        iterations,
        learning_rate=0.8,
        obstacle=None,
        fbzs=None,
        data_path="assets/"):
    """
    target: [DataFrame] ['city', 'y', 'x']
    iterations: [int] the max iteration times
    learning rate: [float] the original learning rate, will decay
    obstacle: [DataFrame] ['obs' 'y' 'x']
    data_path: [str] 迭代过程中以文件形式保存的数据的路径
    
    return: [index] route

    Solve the TSP using a Self-Organizing Map.
    """

    # Obtain the normalized set of cities (w/ coord in [0,1])
    # copy one so the later process won't influence the original data
    cities = target.copy()[['x', 'y']]
    obs = obstacle.copy()[['x', 'y']] if obstacle is not None else None

    norm_ans = normalization(fbzs, cities, obs)
    cities, obs, span, fbzs = norm_ans["result"][0], norm_ans["result"][
        1], norm_ans["dif"], norm_ans["fbzs"]
    obs = obs[['x', 'y']].to_numpy()
    targets = cities[['x', 'y']].to_numpy()
    # The population size is 8 times the number of cities
    n = targets.shape[0] * 8  # 这里是神经元数目,别误解为人口(population)数目
    n = n + obs.shape[0] * 2 if obstacle is not None else n
    n = n + len(fbzs) * 2 if obstacle is not None else n

    # parameters set to observe and evaluate 自己加的
    axes = update_figure()
    old_delta, old_network = [], 0  # 用来判断变化大小的收敛变量
    gate = 1 / span  # 收敛条件设定,精度的映射
    obs_size = 4 * gate
    # Generate an adequate network of neurons:
    network = generate_network(n)  # 2列矩阵
    logging.info('Network of %s neurons created. Starting iterations:', n)

    for i in range(iterations):
        if not i % 100:
            # "\r"回车,将光标移到本行开头,大概就是覆盖了吧
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        route_dir_vec = get_route_vector(network, d=0, t=1)  # 当前路径下的顺时针,出发方向向量

        # Choose a random city
        # DataFrame.values --> numpy.ndarray
        # city = cities.sample(1)[['x', 'y']].values
        city = random.choice(targets)  # 随机选取一个目标
        winner_idx = select_closest(network, city)
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        city_delta = gaussian[:, np.newaxis] * (city - network)
        network += learning_rate * city_delta

        # choose a random obstacle
        # if obs is None:
        #     obs_delta = 0
        # else:
        #     # obs_influence = ver_vec(np.roll(route_dir_vec, 1, axis=0),
        #     #                         get_ob_influences(network, obs, obs_size))
        #     obs_delta = get_ob_influences(network, obs, obs_size)
        #     network += learning_rate * obs_delta

        # adjust the forbidden area
        # if fbzs is not None:
        #     fbzs_delta = np.zeros(network.shape)
        #     for fbz in fbzs:
        #         for index, node in enumerate(network):
        #             if is_point_in_polygon(node, fbz) == 1:
        #                 ver_dist_v = ver_vec(get_route_vector(fbz), fbz - node)
        #                 # 计算 node to 边界的距离并找到最小值的位置
        #                 ver_dist = np.linalg.norm(ver_dist_v, axis=1)
        #                 closest = ver_dist.argmin()

        #                 # update delta
        #                 fbzs_delta[index] += ver_dist_v[closest]
        #             # 这里可以添加安全距离 / ver_dist[closest] * 1
        # a = np.linalg.norm(fbzs_delta, axis=1)
        # fbzs_delta = ver_vec(np.roll(route_dir_vec, 1, axis=0),
        #                      fbzs_delta)  # 垂直方向影响
        # b = np.linalg.norm(fbzs_delta, axis=1)
        # fbzs_delta[b != 0] *= (a[b != 0] / b[b != 0])[:, np.newaxis]
        # network += fbzs_delta

        # Update the network's weights (closer to the city)
        # delta = city_delta + obs_delta
        # network += learning_rate * delta

        # 修正结点分布,使之间隔更加均匀
        # network = sepaprate_node(network)
        winner_indices = np.apply_along_axis(
            func1d=lambda t: select_closest(network, t),
            axis=1,
            arr=targets,
        )  # 胜者不改变
        network = sep_and_close_nodes(
            network,
            decay=learning_rate,
            targets=targets,
            obstacle=obs,  # 圆形障碍物
            obs_size=obs_size,  # 障碍物半径
            fbzs=fbzs,  # 不规则障碍物
            gate=gate,  # 最大更新步长
            winner_indices=winner_indices,
        )
        # Decay the variables
        # 学习率更新 对应了 e^{-t/t0} t0=33332.83
        learning_rate = learning_rate * 0.99997
        # 高斯函数邻域更新 对应了σ=σ0*e^{-t/t0}, σ0=n//10 t0=3332.83
        n = n * 0.9997

        # Check for plotting interval
        if not i % 200:
            plot_network(
                targets,
                network,
                name=data_path + '{:05d}.png'.format(i),
                axes=axes,
                obstacle=obs,
                obs_size=obs_size,
                span=span,
                fbzs=fbzs,
            )
            update_figure(axes, clean=True)

        # Check if any parameter has completely decayed. 收敛判断
        if n < 1:
            finish_info = 'Radius has completely decayed.'
            break
        if learning_rate < 0.001:
            finish_info = 'Learning rate has completely decayed.'
            break

        delta = network - old_network if old_network is not None else network
        max_delta = np.linalg.norm(delta, axis=1).max()  # 计算变化的模长 (n,1) array
        old_delta.append(max_delta)
        old_network = network.copy()
        if len(old_delta) > network.shape[0]:  # 存储神经元结点数目的delta,避免概率影响收敛
            old_delta.pop(0)
        if max(old_delta) < gate:
            # 当迭代变化最大值还小于设定的精度时就停止
            finish_info = "Average movement has reduced to {},".format(
                np.mean(old_delta) * span)
            finish_info += "max movement {},".format(np.max(old_delta) * span)
            break

    # 训练完成后进行的工作
    finish_info += "finishing execution at {} iterations".format(i)
    logging.info(finish_info)

    # 保存路径图片
    plot_network(
        targets,
        network,
        name=data_path + 'final.png',
        obstacle=obs,
        obs_size=obs_size,
        span=span,
        fbzs=fbzs,
    )

    # 计算路径距离
    distance = route_distance(network) * span  # 恢复到原坐标系下的距离
    logging.info('Route found of length %s', distance)

    return distance
Beispiel #13
0
def multi_som(target,
              iterations,
              learning_rate=0.8,
              obstacle=None,
              fbzs=None,
              data_path="assets/"):
    # 读取数据并进行归一化处理
    logging.info('multi_som loading data')
    targets = target.copy()[['x', 'y']]
    obs = obstacle.copy()[['x', 'y']] if obstacle is not None else None

    norm_ans = normalization(fbzs, targets, obs)
    targets, obs, span, fbzs = norm_ans["result"][0], norm_ans["result"][
        1], norm_ans["dif"], norm_ans["fbzs"]
    # 将数据统一转化为ndarray类型
    obs = obs[['x', 'y']].to_numpy()
    targets = targets[['x', 'y']].to_numpy()

    # 一些便于程序运行的设定
    axes = update_figure()  # set up a figure
    old_delta = []
    gate = 1 / span  # 收敛条件设定,精度的映射
    obs_size = 4 * gate
    net_size = 15
    # 聚类划分环
    k = 2
    labels = cluster(targets, n=k, fbzs=fbzs)
    Network_group = []  # 按照聚类结果创建的Network
    for i in range(k):
        sub_targets = targets[labels == i]
        num = sub_targets.shape[0] * net_size
        radius = num
        sub_network = generate_network(num)
        Network_group.append(Network(sub_network, num, sub_targets, radius))

    logging.info('%s network created', len(Network_group))
    logging.info('Starting iterations:')

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        for net in Network_group:
            # 常规SOM
            target = random.choice(net.targets)
            winner_idx = select_closest(net.network, target)
            gaussian = get_neighborhood(winner_idx, net.radius // 10, net.num)
            target_delta = gaussian[:, np.newaxis] * (target - net.network)
            net.network += learning_rate * target_delta

            # 选取最接近目标点的获胜结点,其他结点往距离最小处移动
            winner_indices = np.apply_along_axis(
                func1d=lambda t: select_closest(net.network, t),
                axis=1,
                arr=net.targets,
            )  # 胜者不改变
            net.network = sep_and_close_nodes(
                net.network,
                decay=learning_rate,
                targets=net.targets,
                obstacle=obs,  # 圆形障碍物
                obs_size=obs_size,  # 障碍物半径
                fbzs=fbzs,  # 不规则障碍物
                gate=gate,  # 最大更新步长
                winner_indices=winner_indices,
            )

        # Decay the variables
        learning_rate = learning_rate * 0.99997
        for net in Network_group:
            net.radius *= 0.9997

        # Check for plotting interval
        if not i % 200:
            plot_network(
                targets,
                neurons=None,
                name=data_path + '{:05d}.png'.format(i),
                axes=axes,
                obstacle=obs,
                obs_size=obs_size,
                span=span,
                fbzs=fbzs,
                Networks=Network_group,
            )
            update_figure(axes, clean=True)

        # Check if any parameter has completely decayed. 收敛判断
        if max([net.radius for net in Network_group]) < 1:
            finish_info = 'Radius has completely decayed.'
            break
        if learning_rate < 0.001:
            finish_info = 'Learning rate has completely decayed.'
            break
        for net in Network_group:
            old_delta.append(net.get_delta())
            if len(old_delta) > net_size * targets.shape[0]:  # 避免概率影响收敛
                old_delta.pop(0)
        if max(old_delta) < gate:
            # 当迭代变化最大值还小于设定的精度时就停止
            finish_info = "Max movement has reduced to {},".format(
                max(old_delta) * span)
            break

    # 训练完成后进行的工作
    finish_info += "finishing execution at {} iterations".format(i)
    logging.info(finish_info)

    # 保存路径图片
    plot_network(
        targets,
        neurons=None,
        name=data_path + 'final.png',
        obstacle=obs,
        obs_size=obs_size,
        span=span,
        fbzs=fbzs,
        Networks=Network_group,
    )

    # 计算路径距离
    distance = 0
    for net in Network_group:
        distance += route_distance(net.network) * span  # 恢复到原坐标系下的距离
    logging.info('Route found of length %s', distance)

    return distance
Beispiel #14
0
def som(problem, iterations, learning_rate=0.8):
    '''
    SOM解决TSP问题
    '''
    # 将城市数据归一化处理
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # 神经元数量设定为城市的8倍
    n = cities.shape[0] * 8

    # 建立神经网络
    network = generate_network(n)

    print('创建{} 个神经元. 开始进行迭代:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> 迭代过程 {}/{}'.format(i, iterations), end='\r')
        # 随机选择一个城市
        city = cities.sample(1)[['x', 'y']].values

        #优胜神经元(距离该城市最近的神经元)
        winner_idx = select_closest(network, city)

        # 以该神经元为中心建立高斯分布
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        # 更新神经元的权值,使神经元向被选中城市移动
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        # 学习率衰减,方差衰减
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # 每迭代1000次时作图
        if not i % 1000:
            plot_network(cities,
                         network,
                         name='som_diagrams/{:05d}.png'.format(i))
            pass
        # 判断方差和学习率是否达到阈值
        if n < 1:
            print('方差已经达到阈值,完成执行次数{}'.format(i))
            break
        if learning_rate < 0.001:
            print('学习率已经达到阈值,完成执行次数{}'.format(i))
            break
    else:
        print('完成迭代:{}次'.format(iterations))

    plot_network(cities, network, name='som_diagrams/final.png')
    route = get_route(cities, network)
    cities = cities.reindex(route)
    plot_route(cities, route, 'som_diagrams/route.png')

    # 将多个png文件合成gif文件
    path = os.chdir('.\som_diagrams')
    pic_list = os.listdir()
    create_gif(pic_list, 'result.gif', 0.3)

    return route
Beispiel #15
0
def SOM(args):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    iteration = args.iteration
    learning_rate = args.learning_rate
    decay = args.decay

    out_dir = Path(args.out_dir)
    out_dir.mkdir_p()
    cities = pd.read_csv(Path(args.data_dir)/'data1.csv')
    cities.to_csv(out_dir/'cities.csv')

    cities_nm = cities.copy()
    cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])
    cities_nm.to_csv(out_dir/'cities_nm.csv')


    # The population size is 8 times the number of cities
    n = cities_nm.shape[0] * 8

    # Generate an adequate network of neurons:
    neuron_chain = init_neurons(n)
    print('--> Network of {} neurons created. Starting the iterations:'.format(n))
    best_route=np.array([0])


    best_id=0
    min_loss=0
    losses={}
    losses_decay = {}

    for i in tqdm(range(iteration)):

        # Choose a random city
        city = cities_nm.sample(1)[['x', 'y']].values#随机抽样 random  sampling
        winner_idx = select_closest(neuron_chain, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chain.shape[0])
        # Update the network's weights (closer to the city)
        neuron_chain += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chain)
        # Decay the variables
        learning_rate = learning_rate * decay
        n = n * decay



        if i % args.evaluate_freq==0:
            route = get_route(cities_nm, neuron_chain)

            cities_od = cities.reindex(route)
            loss = route_distance(cities_od)
            losses[i] = loss

            if  min_loss==0 or min_loss > loss:
                min_loss=loss
                best_route = list(route.astype(np.float64))
                best_id = i
                losses_decay[i] = loss
                cities_od.to_csv(out_dir / 'route_{:04d}.csv'.format(i))
                save_neuron_chain(neuron_chain, out_dir / "neuron_chain_{:04d}.npy".format(i))
    #end for

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break


    print('Completed {} iterations.'.format(iteration))

    results={}
    results['min_loss'] = min_loss
    results['best_id'] = best_id
    results['best_route'] = best_route
    results['losses_decay'] = losses_decay
    results['losses'] = losses

    p = Path(out_dir / 'results.json')
    with open(p, 'w') as fp:
        json.dump(results, fp)
        print('ok')

    return results