Пример #1
0
def main():
    #num_max_iter = 100000 #número máximo de iteraciones
    #learning_rate = 0.8 # tasa de aprendizaje 
    #delta_learning_rate = 0.99997 #tasa de cambio de la tasa de aprendizaje
    #delta_n = 0.9997 #tasa de cambio de  n???? buscar después
    sensi_radio = 1 # sensibilidad del radio de BMU
    sensi_learning_rate = 0.001 #sensibilidad del learning rate
    #factor_neuronas= 8 #cantidad de neuronas por ciudad en la red neuronal
    plotear = False #Si es positivo creará un plot cada 1000 iteraciones y una de la ruta final
    runs = 10 #veces en que se corre el modelo por cada instancia

    for instancia in ['qa194','uy734','ar9152','fi10639','it16862']:#'ch71009']:
        for num_max_iter in [5000,10000,20000,50000,100000]:
            for delta_learning_rate,delta_n in [0.9997, 0.98, 0.95]:
                delta_n=delta_learning_rate
                for learning_rate in [0.9, 0.8, 0.7, 0.6]:
                    for factor_neuronas in [2,4,6,8]:
                        for corrida in range(runs):
                            print('Corrida ', corrida+1)
                            problem = read_tsp(instancia+'.tsp')
                            route = som(instancia,problem, num_max_iter,learning_rate, delta_learning_rate, delta_n,sensi_radio,sensi_learning_rate,factor_neuronas, plotear)
                            problem = problem.reindex(route)
                            distance = route_distance(problem)
                            write(instancia, num_max_iter, delta_learning_rate, delta_n, distance, learning_rate, factor_neuronas, corrida+1)
                            print('Ruta encontrada de distancia {}'.format(distance))
Пример #2
0
def main():
    #if len(argv) != 2:       # deleted by EE526
     #   print("inCorrect use: python src/main.py <filename>.tsp")
      #  return -1
    time1 = time.time()
    problem = read_tsp('assets/usa.tsp')  # 打开文件,problem是DataFrame格式
    # problem = pd.read_csv('assets/a280.csv', encoding='gbk')  # 另一种方式,直接读入表格
    # print(problem)

    route, dSave = som(problem, 50000)   # 第二参数是迭代次数,route是list
    np.savetxt('out_files\ route.txt', route, delimiter=',')
    # print("route:", route)
    time2 = time.time()
    print('Running time: %s Seconds' % (time2 - time1))
    problem = problem.reindex(route)
    distance = route_distance(problem)
    # 画出迭代曲线
    fig = plt.figure()
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(dSave, color='red', linewidth=1)
    plt.savefig("./out_files/iter.png", dpi=600, bbox_inches='tight')
    plt.show()
    print('Route found of length {}'.format(distance))
Пример #3
0
def main():
    file_argv = r'../assets/ch34.tsp'  # 34 cities in China
    problem = read_tsp(file_argv)
    route = som(problem, 100000)
    problem = problem.reindex(route)
    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #4
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    dSave = np.zeros(iterations // 100)  # 保存数据
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 单位化

    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3
    #n = cities.shape[0] * 3   # 测试用,by EE526

    # Generate an adequate network of neurons:
    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:  # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        if not i % 100:  # 每隔1000次画出神经元图像, 求距离
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' % (i))
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // 100] = route_distance(p)

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network)

    route = get_route(cities, network)
    plot_route(cities, route)
    return route, dSave
Пример #5
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化


    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 4

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=2)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        # if i % 100 == 0:      # 每隔100次画出神经元图像
        #     plot_network(cities, network, name='out_files\process\city_network%d.png'%(i//100))

        if not i % 100:   # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values   # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # print(winner_idx)  # DEBUG
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :], axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            # Generate a filter that applies changes to the winner's gaussian
            gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])  # 高斯核函数是算法的核心
            # Update the network's weights (closer to the city)
            network += gaussian[:, np.newaxis] * learning_rate * (city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99999
        n = n * 0.9995  # 较好的参数是0.9991-0.9997

        if not i % 100:      # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route, dSave
Пример #6
0
def IDtspsom(problem,
             iterations,
             learning_rate=0.8,
             dsigma=0.9995,
             dalpha=0.99997):
    """Solve the TSP using a Self-Organizing Map.
        密度+渗透,n*3
    """
    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=3)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :],
                          axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            gaussian = get_neighborhood(winner_idx, n // 10,
                                        network.shape[0])  # 高斯核函数是算法的核心
            network += gaussian[:, np.newaxis] * learning_rate * (
                city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * dalpha
        n = n * dsigma  # 较好的参数是0.9991-0.9997

        if not i % 100:  # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

        # # Check if any parameter has completely decayed.
        # if n < 1:
        #     print('Radius has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
        # if learning_rate < 0.001:
        #     print('Learning rate has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
    else:
        print('Completed {} iterations.'.format(iterations))
    route = get_route(cities, network)
    return route, dSave
Пример #7
0
def main():
    #num_max_iter = 100000 #número máximo de iteraciones
    #learning_rate = 0.8 # tasa de aprendizaje
    #delta_learning_rate = 0.99997 #tasa de cambio de la tasa de aprendizaje
    #delta_n = 0.9997 #tasa de cambio de  n???? buscar después
    sensi_radio = 1  # sensibilidad del radio de BMU
    sensi_learning_rate = 0.001  #sensibilidad del learning rate
    #factor_neuronas= 8 #cantidad de neuronas por ciudad en la red neuronal
    plotear = False  #Si es positivo creará un plot cada 1000 iteraciones y una de la ruta final
    runs = 10  #veces en que se corre el modelo por cada instancia
    #genration = 1 : normal, 2: circulo

    for instancia in [
            'qa194', 'uy734', 'ar9152', 'fi10639', 'it16862', 'ei8246',
            'gr9882', 'ja9847', 'kz9976', 'mu1979', 'rw1621', 'vm22775',
            'ym7663'
    ]:
        for num_max_iter in [5000, 10000, 20000, 50000, 100000]:
            for delta_learning_rate in [0.9997, 0.98, 0.95]:
                delta_n = delta_learning_rate
                for learning_rate in [0.9, 0.8, 0.7, 0.6]:
                    for factor_neuronas in [2, 4, 6, 8]:
                        for generation in [1, 2]:
                            for corrida in range(runs):
                                print('Corrida número', corrida + 1)
                                inicio = time.time()
                                problem = read_tsp(instancia + '.tsp')
                                route = som(instancia, problem, num_max_iter,
                                            learning_rate, delta_learning_rate,
                                            delta_n, sensi_radio,
                                            sensi_learning_rate,
                                            factor_neuronas, generation,
                                            plotear)
                                tiempo_entrenamiento = time.time() - inicio
                                print('Tiempo de entrenamiento',
                                      tiempo_entrenamiento)
                                inicio2 = time.time()
                                problem = problem.reindex(route)
                                distance = route_distance(problem)
                                tiempo_resolucion = time.time() - inicio2
                                print('Tiempo de enrutamiento ',
                                      tiempo_resolucion)
                                tiempo_total = tiempo_entrenamiento + tiempo_resolucion
                                write(instancia, num_max_iter,
                                      delta_learning_rate, delta_n, distance,
                                      learning_rate, factor_neuronas,
                                      corrida + 1, tiempo_entrenamiento,
                                      tiempo_resolucion, tiempo_total,
                                      generation)
                                print('Ruta encontrada de distancia {}'.format(
                                    distance))
                                print('Tiempo total de tiempo de resolución ',
                                      tiempo_total)
Пример #8
0
def plt_mtsp(args):
    p = Path(args.data_dir) / 'data1.csv'
    df = pd.read_csv(p)

    # 点标号
    for index, row in df.iterrows():
        plt.text(row['x'], row['y'], int(row['city']), fontsize=10)
    print(df['city'].to_numpy())

    x=df['x']
    y=df['y']
    # 绘点
    plt.scatter(x, y, c='blue')
    start = df.query('city==0')
    plt.title('Best Routes')
    plt.xlabel('x')
    plt.ylabel('y')

    plt.scatter(float(start['x']), float(start['y']), c='red')

    routes = args.routes

    routes_nps = []
    for route in routes:
        route = np.array(route)
        routes_nps.append(route)
    dfs=[]
    dises=[]
    xs=[]
    ys=[]
    colors=['r','g','b','k']
    for idx,item in enumerate(routes_nps):
        df_ = df.reindex(item)
        dfs.append(df_)
        dises.append(route_distance(df_))
        print("--> route distance:{}".format(dises[idx]))
        xs.append(np.array(dfs[-1]['x']))
        ys.append(np.array(dfs[-1]['y']))
        plt.plot(xs[-1],ys[-1],colors[idx])
        plt.plot([xs[-1][0],xs[-1][-1]],[ys[-1][0],ys[-1][-1]],colors[idx])

    print(np.array(dises).sum())





    # plt.xlim([169, 171])
    # plt.ylim([35, 37])

    plt.show()
Пример #9
0
def main():
    problem = read_tsp('assets\mytest.tsp')  # 打开文件,problem是DataFrame格式
    # problem = pd.read_csv('assets\city20.csv')  # 设置数据类型,city是str)  #
    print(problem)

    route = som(problem, 100000)  # 第二参数是迭代次数,route是list
    np.savetxt('out_files\ route.txt', route, delimiter=',')
    print("route:", route)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #10
0
def main(tsp_filepath, iterations):
    logger.info('Reading TSP file: {}'.format(tsp_filepath))
    problem = read_tsp(tsp_filepath)

    logger.info('Starting searching sub-optimal solution')
    route = som(problem, iterations)

    logger.info('Reindexing DataFrame')
    problem = problem.reindex(route)

    logger.info('Route distance')
    distance = route_distance(problem)

    logger.info('Route found of length {}'.format(distance))
Пример #11
0
def main():
    #print(argv)
    # if len(argv) != 2:
    #     print("Correct use: python src/main.py <filename>.tsp")
    #     return -1
    # city_file = 'assets/uy734.tsp'
    problem = read_tsp('uy734.tsp')
    route = som(problem, 100000)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #12
0
def main():
    if len(argv) != 2:
        print("Correct use: python src/main.py <filename>.tsp")
        return -1

    problem = read_tsp(argv[1])

    route = som(problem, 100000)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #13
0
def main():

    # 选择输入数据文件
    if len(argv) != 2:
        print("输入: python SOM/main.py <filename>.tsp")
        return -1
    # 读取tsp文件

    problem = read_tsp(argv[1])

    # problem = read_tsp('assets/qa194.tsp')
    route = som(problem, 100000)
    problem = problem.reindex(route)
    distance = route_distance(problem)
    print("最短路线为:{}".format(distance))
Пример #14
0
def main():
    # 从命令行读取参数
    # 检查调用命令是否正确
    if len(argv) != 2:
        print("Correct use: python src/main.py <filename>.tsp")
        return -1
    # 读取tsp文件名(python不作为参数关键字)
    problem = read_tsp(argv[1])
    # 训练SOM网络找到路径
    route = som(problem, 100000)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #15
0
def som_from_outside(data, instancia="ar9152"):
    num_max_iter = 5000  #número máximo de iteraciones
    learning_rate = 0.8  # tasa de aprendizaje
    delta_learning_rate = 0.99997  #tasa de cambio de la tasa de aprendizaje
    delta_n = 0.9997  #tasa de cambio de  n???? buscar después
    sensi_radio = 1  # sensibilidad del radio de BMU
    sensi_learning_rate = 0.001  #sensibilidad del learning rate
    factor_neuronas = 8  #cantidad de neuronas por ciudad en la red neuronal
    plotear = False  #Si es positivo creará un plot cada 1000 iteraciones y una de la ruta final
    #genration = 1 : normal, 2: circulo
    generation = 2
    route = som_2(instancia, data, num_max_iter, learning_rate,
                  delta_learning_rate, delta_n, sensi_radio,
                  sensi_learning_rate, factor_neuronas, generation, plotear)
    problem = problem.reindex(route)
    distance = route_distance(problem)
    print('Ruta encontrada de distancia {}'.format(distance))
Пример #16
0
def main():
    #if len(argv) != 2:       # deleted by EE526
    #   print("inCorrect use: python src/main.py <filename>.tsp")
    #  return -1

    problem = read_tsp('assets\qa194.tsp')  # 打开文件
    print(problem)

    route = som(problem, 10000)
    np.savetxt('out_files\ route.txt', route, delimiter=',')
    print("route:", route)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #17
0
def main():
    if len(argv) != 2:
        print("Correct use: python src/main.py <filename>.tsp")
        return -1
    start = time.clock()

    problem = read_tsp(argv[1])

    route = som(problem, 20000)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    elapsed = (time.clock() - start)

    print('Route found of length {}'.format(distance))

    print("Time used:", elapsed)
Пример #18
0
def main():
    #if len(argv) != 2:       # deleted by EE526
    #   print("inCorrect use: python src/main.py <filename>.tsp")
    #  return -1
    time1 = time.time()
    # problem = read_tsp('assets\qa194.tsp')  # 打开文件,problem是DataFrame格式
    problem = pd.read_csv('assets\china.csv', encoding='gbk')  # 另一种方式,直接读入表格
    # print(problem)

    route = som(problem, 10000)  # 第二参数是迭代次数,route是list
    np.savetxt('out_files\ route.txt', route, delimiter=',')
    # print("route:", route)
    time2 = time.time()
    print('Running time: %s Seconds' % (time2 - time1))
    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #19
0
def plt_traj_np(args):
    p = Path(args.data_dir)/'data1.csv'
    df = pd.read_csv(p)
    route = np.array(args.route_plt)
    df =df.reindex(route)
    dis = route_distance(df)
    print("--> route distance:{}".format(dis))
    x = np.array(df['x'])
    y =np.array( df['y'])




    #点标号
    for index,row in df.iterrows():
        plt.text(row['x'],row['y'],int(row['city']),fontsize=10)

    print(df['city'].to_numpy())
    #画路径
    plt.plot(x,y,'r')
    plt.plot([x[0],x[-1]],[y[0],y[-1]],'r')


    #绘点
    plt.scatter(x,y,c='blue')
    start = df.query('city==0')
    plt.title('Best Route')
    plt.xlabel('x')
    plt.ylabel('y')

    plt.scatter(float(start['x']),float(start['y']),c='red')

    #plt.xlim([169, 171])
    #plt.ylim([35, 37])

    plt.show()

    pass
Пример #20
0
def main():
    #if len(argv) != 2:       # deleted by EE526
    #   print("inCorrect use: python src/main.py <filename>.tsp")
    #  return -1

    problem = read_tsp('assets/att48.tsp')  # 打开文件,problem是DataFrame格式
    # problem = pd.read_csv('assets/a280.csv', encoding='utf-8')  # 另一种方式,直接读入表格
    # print(problem)

    route, dSave = som(problem, 10000)  # 第二参数是迭代次数,route是list
    np.savetxt('out_files\ route.txt', route, delimiter=',')
    print("route:", route)
    problem = problem.reindex(route)
    distance = route_distance(problem)
    # 画出迭代曲线
    fig = plt.figure()
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(dSave, color='red')
    plt.savefig("./out_files/iter.png")
    plt.show()

    print('Route found of length {}'.format(distance))
Пример #21
0
def SOM(args):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    iteration = args.iteration
    learning_rate = args.learning_rate
    decay = args.decay

    out_dir = Path(args.out_dir)
    out_dir.mkdir_p()
    cities = pd.read_csv(Path(args.data_dir)/'data1.csv')
    cities.to_csv(out_dir/'cities.csv')

    cities_nm = cities.copy()
    cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])
    cities_nm.to_csv(out_dir/'cities_nm.csv')


    # The population size is 8 times the number of cities
    n = cities_nm.shape[0] * 8

    # Generate an adequate network of neurons:
    neuron_chain = init_neurons(n)
    print('--> Network of {} neurons created. Starting the iterations:'.format(n))
    best_route=np.array([0])


    best_id=0
    min_loss=0
    losses={}
    losses_decay = {}

    for i in tqdm(range(iteration)):

        # Choose a random city
        city = cities_nm.sample(1)[['x', 'y']].values#随机抽样 random  sampling
        winner_idx = select_closest(neuron_chain, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chain.shape[0])
        # Update the network's weights (closer to the city)
        neuron_chain += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chain)
        # Decay the variables
        learning_rate = learning_rate * decay
        n = n * decay



        if i % args.evaluate_freq==0:
            route = get_route(cities_nm, neuron_chain)

            cities_od = cities.reindex(route)
            loss = route_distance(cities_od)
            losses[i] = loss

            if  min_loss==0 or min_loss > loss:
                min_loss=loss
                best_route = list(route.astype(np.float64))
                best_id = i
                losses_decay[i] = loss
                cities_od.to_csv(out_dir / 'route_{:04d}.csv'.format(i))
                save_neuron_chain(neuron_chain, out_dir / "neuron_chain_{:04d}.npy".format(i))
    #end for

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break


    print('Completed {} iterations.'.format(iteration))

    results={}
    results['min_loss'] = min_loss
    results['best_id'] = best_id
    results['best_route'] = best_route
    results['losses_decay'] = losses_decay
    results['losses'] = losses

    p = Path(out_dir / 'results.json')
    with open(p, 'w') as fp:
        json.dump(results, fp)
        print('ok')

    return results
Пример #22
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv

from distance import route_distance
from ga_solver import CGASolver
from io_helper import read_tsp

if __name__ == '__main__':
    assert len(argv) == 2, "Correct use: python3 main.py <filename>.tsp"

    tsp_map = read_tsp(argv[1])

    ga_solver = CGASolver(tsp_map)
    route = ga_solver.solve(100000)

    print('Route found of length {}'.format(
        route_distance(tsp_map.reindex(route))))
Пример #23
0
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')

    return route


if __name__ == '__main__':
    if (len(argv) != 2):
        print("Correct use: python main.py <filename>.tsp")
        exit()

    problem = read_tsp(argv[1])

    route = som(problem, 100000)

    problem = problem.reindex(route)

    distance = route_distance(problem)

    print('Route found of length {}'.format(distance))
Пример #24
0
def som(target,
        iterations,
        learning_rate=0.8,
        obstacle=None,
        fbzs=None,
        data_path="assets/"):
    """
    target: [DataFrame] ['city', 'y', 'x']
    iterations: [int] the max iteration times
    learning rate: [float] the original learning rate, will decay
    obstacle: [DataFrame] ['obs' 'y' 'x']
    data_path: [str] 迭代过程中以文件形式保存的数据的路径
    
    return: [index] route

    Solve the TSP using a Self-Organizing Map.
    """

    # Obtain the normalized set of cities (w/ coord in [0,1])
    # copy one so the later process won't influence the original data
    cities = target.copy()[['x', 'y']]
    obs = obstacle.copy()[['x', 'y']] if obstacle is not None else None

    norm_ans = normalization(fbzs, cities, obs)
    cities, obs, span, fbzs = norm_ans["result"][0], norm_ans["result"][
        1], norm_ans["dif"], norm_ans["fbzs"]
    obs = obs[['x', 'y']].to_numpy()
    targets = cities[['x', 'y']].to_numpy()
    # The population size is 8 times the number of cities
    n = targets.shape[0] * 8  # 这里是神经元数目,别误解为人口(population)数目
    n = n + obs.shape[0] * 2 if obstacle is not None else n
    n = n + len(fbzs) * 2 if obstacle is not None else n

    # parameters set to observe and evaluate 自己加的
    axes = update_figure()
    old_delta, old_network = [], 0  # 用来判断变化大小的收敛变量
    gate = 1 / span  # 收敛条件设定,精度的映射
    obs_size = 4 * gate
    # Generate an adequate network of neurons:
    network = generate_network(n)  # 2列矩阵
    logging.info('Network of %s neurons created. Starting iterations:', n)

    for i in range(iterations):
        if not i % 100:
            # "\r"回车,将光标移到本行开头,大概就是覆盖了吧
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        route_dir_vec = get_route_vector(network, d=0, t=1)  # 当前路径下的顺时针,出发方向向量

        # Choose a random city
        # DataFrame.values --> numpy.ndarray
        # city = cities.sample(1)[['x', 'y']].values
        city = random.choice(targets)  # 随机选取一个目标
        winner_idx = select_closest(network, city)
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        city_delta = gaussian[:, np.newaxis] * (city - network)
        network += learning_rate * city_delta

        # choose a random obstacle
        # if obs is None:
        #     obs_delta = 0
        # else:
        #     # obs_influence = ver_vec(np.roll(route_dir_vec, 1, axis=0),
        #     #                         get_ob_influences(network, obs, obs_size))
        #     obs_delta = get_ob_influences(network, obs, obs_size)
        #     network += learning_rate * obs_delta

        # adjust the forbidden area
        # if fbzs is not None:
        #     fbzs_delta = np.zeros(network.shape)
        #     for fbz in fbzs:
        #         for index, node in enumerate(network):
        #             if is_point_in_polygon(node, fbz) == 1:
        #                 ver_dist_v = ver_vec(get_route_vector(fbz), fbz - node)
        #                 # 计算 node to 边界的距离并找到最小值的位置
        #                 ver_dist = np.linalg.norm(ver_dist_v, axis=1)
        #                 closest = ver_dist.argmin()

        #                 # update delta
        #                 fbzs_delta[index] += ver_dist_v[closest]
        #             # 这里可以添加安全距离 / ver_dist[closest] * 1
        # a = np.linalg.norm(fbzs_delta, axis=1)
        # fbzs_delta = ver_vec(np.roll(route_dir_vec, 1, axis=0),
        #                      fbzs_delta)  # 垂直方向影响
        # b = np.linalg.norm(fbzs_delta, axis=1)
        # fbzs_delta[b != 0] *= (a[b != 0] / b[b != 0])[:, np.newaxis]
        # network += fbzs_delta

        # Update the network's weights (closer to the city)
        # delta = city_delta + obs_delta
        # network += learning_rate * delta

        # 修正结点分布,使之间隔更加均匀
        # network = sepaprate_node(network)
        winner_indices = np.apply_along_axis(
            func1d=lambda t: select_closest(network, t),
            axis=1,
            arr=targets,
        )  # 胜者不改变
        network = sep_and_close_nodes(
            network,
            decay=learning_rate,
            targets=targets,
            obstacle=obs,  # 圆形障碍物
            obs_size=obs_size,  # 障碍物半径
            fbzs=fbzs,  # 不规则障碍物
            gate=gate,  # 最大更新步长
            winner_indices=winner_indices,
        )
        # Decay the variables
        # 学习率更新 对应了 e^{-t/t0} t0=33332.83
        learning_rate = learning_rate * 0.99997
        # 高斯函数邻域更新 对应了σ=σ0*e^{-t/t0}, σ0=n//10 t0=3332.83
        n = n * 0.9997

        # Check for plotting interval
        if not i % 200:
            plot_network(
                targets,
                network,
                name=data_path + '{:05d}.png'.format(i),
                axes=axes,
                obstacle=obs,
                obs_size=obs_size,
                span=span,
                fbzs=fbzs,
            )
            update_figure(axes, clean=True)

        # Check if any parameter has completely decayed. 收敛判断
        if n < 1:
            finish_info = 'Radius has completely decayed.'
            break
        if learning_rate < 0.001:
            finish_info = 'Learning rate has completely decayed.'
            break

        delta = network - old_network if old_network is not None else network
        max_delta = np.linalg.norm(delta, axis=1).max()  # 计算变化的模长 (n,1) array
        old_delta.append(max_delta)
        old_network = network.copy()
        if len(old_delta) > network.shape[0]:  # 存储神经元结点数目的delta,避免概率影响收敛
            old_delta.pop(0)
        if max(old_delta) < gate:
            # 当迭代变化最大值还小于设定的精度时就停止
            finish_info = "Average movement has reduced to {},".format(
                np.mean(old_delta) * span)
            finish_info += "max movement {},".format(np.max(old_delta) * span)
            break

    # 训练完成后进行的工作
    finish_info += "finishing execution at {} iterations".format(i)
    logging.info(finish_info)

    # 保存路径图片
    plot_network(
        targets,
        network,
        name=data_path + 'final.png',
        obstacle=obs,
        obs_size=obs_size,
        span=span,
        fbzs=fbzs,
    )

    # 计算路径距离
    distance = route_distance(network) * span  # 恢复到原坐标系下的距离
    logging.info('Route found of length %s', distance)

    return distance
Пример #25
0
def multi_som(target,
              iterations,
              learning_rate=0.8,
              obstacle=None,
              fbzs=None,
              data_path="assets/"):
    # 读取数据并进行归一化处理
    logging.info('multi_som loading data')
    targets = target.copy()[['x', 'y']]
    obs = obstacle.copy()[['x', 'y']] if obstacle is not None else None

    norm_ans = normalization(fbzs, targets, obs)
    targets, obs, span, fbzs = norm_ans["result"][0], norm_ans["result"][
        1], norm_ans["dif"], norm_ans["fbzs"]
    # 将数据统一转化为ndarray类型
    obs = obs[['x', 'y']].to_numpy()
    targets = targets[['x', 'y']].to_numpy()

    # 一些便于程序运行的设定
    axes = update_figure()  # set up a figure
    old_delta = []
    gate = 1 / span  # 收敛条件设定,精度的映射
    obs_size = 4 * gate
    net_size = 15
    # 聚类划分环
    k = 2
    labels = cluster(targets, n=k, fbzs=fbzs)
    Network_group = []  # 按照聚类结果创建的Network
    for i in range(k):
        sub_targets = targets[labels == i]
        num = sub_targets.shape[0] * net_size
        radius = num
        sub_network = generate_network(num)
        Network_group.append(Network(sub_network, num, sub_targets, radius))

    logging.info('%s network created', len(Network_group))
    logging.info('Starting iterations:')

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        for net in Network_group:
            # 常规SOM
            target = random.choice(net.targets)
            winner_idx = select_closest(net.network, target)
            gaussian = get_neighborhood(winner_idx, net.radius // 10, net.num)
            target_delta = gaussian[:, np.newaxis] * (target - net.network)
            net.network += learning_rate * target_delta

            # 选取最接近目标点的获胜结点,其他结点往距离最小处移动
            winner_indices = np.apply_along_axis(
                func1d=lambda t: select_closest(net.network, t),
                axis=1,
                arr=net.targets,
            )  # 胜者不改变
            net.network = sep_and_close_nodes(
                net.network,
                decay=learning_rate,
                targets=net.targets,
                obstacle=obs,  # 圆形障碍物
                obs_size=obs_size,  # 障碍物半径
                fbzs=fbzs,  # 不规则障碍物
                gate=gate,  # 最大更新步长
                winner_indices=winner_indices,
            )

        # Decay the variables
        learning_rate = learning_rate * 0.99997
        for net in Network_group:
            net.radius *= 0.9997

        # Check for plotting interval
        if not i % 200:
            plot_network(
                targets,
                neurons=None,
                name=data_path + '{:05d}.png'.format(i),
                axes=axes,
                obstacle=obs,
                obs_size=obs_size,
                span=span,
                fbzs=fbzs,
                Networks=Network_group,
            )
            update_figure(axes, clean=True)

        # Check if any parameter has completely decayed. 收敛判断
        if max([net.radius for net in Network_group]) < 1:
            finish_info = 'Radius has completely decayed.'
            break
        if learning_rate < 0.001:
            finish_info = 'Learning rate has completely decayed.'
            break
        for net in Network_group:
            old_delta.append(net.get_delta())
            if len(old_delta) > net_size * targets.shape[0]:  # 避免概率影响收敛
                old_delta.pop(0)
        if max(old_delta) < gate:
            # 当迭代变化最大值还小于设定的精度时就停止
            finish_info = "Max movement has reduced to {},".format(
                max(old_delta) * span)
            break

    # 训练完成后进行的工作
    finish_info += "finishing execution at {} iterations".format(i)
    logging.info(finish_info)

    # 保存路径图片
    plot_network(
        targets,
        neurons=None,
        name=data_path + 'final.png',
        obstacle=obs,
        obs_size=obs_size,
        span=span,
        fbzs=fbzs,
        Networks=Network_group,
    )

    # 计算路径距离
    distance = 0
    for net in Network_group:
        distance += route_distance(net.network) * span  # 恢复到原坐标系下的距离
    logging.info('Route found of length %s', distance)

    return distance