Exemplo n.º 1
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    dSave = np.zeros(iterations // 100)  # 保存数据
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 单位化

    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3
    #n = cities.shape[0] * 3   # 测试用,by EE526

    # Generate an adequate network of neurons:
    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:  # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        if not i % 100:  # 每隔1000次画出神经元图像, 求距离
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' % (i))
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // 100] = route_distance(p)

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network)

    route = get_route(cities, network)
    plot_route(cities, route)
    return route, dSave
Exemplo n.º 2
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化


    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 4

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=2)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        # if i % 100 == 0:      # 每隔100次画出神经元图像
        #     plot_network(cities, network, name='out_files\process\city_network%d.png'%(i//100))

        if not i % 100:   # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values   # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # print(winner_idx)  # DEBUG
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :], axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            # Generate a filter that applies changes to the winner's gaussian
            gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])  # 高斯核函数是算法的核心
            # Update the network's weights (closer to the city)
            network += gaussian[:, np.newaxis] * learning_rate * (city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99999
        n = n * 0.9995  # 较好的参数是0.9991-0.9997

        if not i % 100:      # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route, dSave
Exemplo n.º 3
0
def IDtspsom(problem,
             iterations,
             learning_rate=0.8,
             dsigma=0.9995,
             dalpha=0.99997):
    """Solve the TSP using a Self-Organizing Map.
        密度+渗透,n*3
    """
    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=3)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :],
                          axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            gaussian = get_neighborhood(winner_idx, n // 10,
                                        network.shape[0])  # 高斯核函数是算法的核心
            network += gaussian[:, np.newaxis] * learning_rate * (
                city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * dalpha
        n = n * dsigma  # 较好的参数是0.9991-0.9997

        if not i % 100:  # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

        # # Check if any parameter has completely decayed.
        # if n < 1:
        #     print('Radius has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
        # if learning_rate < 0.001:
        #     print('Learning rate has completely decayed, finishing execution',
        #     'at {} iterations'.format(i))
        #     break
    else:
        print('Completed {} iterations.'.format(iterations))
    route = get_route(cities, network)
    return route, dSave
Exemplo n.º 4
0
def som(problem, iterations, learning_rate=0.8):
    '''
    Solve the TSP using a Self-Organizing Map.
    :params problem(dataframe): 城市坐标 
    :params iterations(int): 最大迭代次数
    :learning_rate(float): 学习率
    :return route
    '''
    cities = problem.copy()

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # The population size is 8 times the number of cities
    # <Hyperparameter:times>
    n = cities.shape[0] * 8

    # Generate an adequate network of neurons:
    # (n,2)
    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        # <Hyperparameter:radius>
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)
        # Decay the variables
        # <Hyperparameter:decay rate>
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Exemplo n.º 5
0
def som(instancia,problem, iterations, learning_rate,delta_learning_rate, delta_n,sensi_radio,sensi_learning_rate,factor_neuronas,plotear):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtenemos primero las ciudades normalizadas (con coordenadas en [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    #La población de neuronas se crea con factor_neuronas veces la cantidad de ciudades
    n = cities.shape[0] * factor_neuronas

    # Generamos una adecuada red de neuronas de la forma
    network = generate_network(n)

    if plotear:
        print('Red de {} neuronas creadas. Comenzando las iteraciones:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteración {}/{}'.format(i, iterations), end="\r")
        # Se escoge una ciudad de forma aleatoria
        city = cities.sample(1)[['x', 'y']].values
        #Se busca la neurona más cercana a la ciudad, la winner neuron
        winner_idx = select_closest(network, city)
        #Genera un filtro que aplica los cambios al winner o BMU
        gaussian = get_neighborhood(winner_idx, n//10, network.shape[0])
        # Actualizar los pesos de la red según una distribución gaussiana
        network += gaussian[:,np.newaxis] * learning_rate * (city - network)
        
        # actualizar las parametros
        learning_rate = learning_rate * delta_learning_rate
        n = n * delta_n

        # Chequear para plotear cada 1000 iteraciones
        if plotear:
            if not i % 1000:
                plot_network(cities, network, name='imagenes/'+instancia+'/{:05d}.png'.format(i))

        # Chequear si algún parametro a caído por debajo de la sensibilidad
        if n < sensi_radio:
            print('Radio por debajo de sensibilidad, Se ha terminado la ejecución',
            'a {} las iteraciones'.format(i))
            break
        if learning_rate < sensi_learning_rate:
            print('Learning rate por debajo de sensibilidad, Se ha terminado la ejecución',
            'a las {} iteraciones'.format(i))
            break
    else:
        print('Se han completado las {} iteraciones.'.format(iterations))

    if plotear:
        plot_network(cities, network, name='imagenes/'+instancia+'/final.png')


    route = get_route(cities, network)
    if plotear:
        plot_route(cities, route, 'imagenes/'+instancia+'/route.png')
    return route
Exemplo n.º 6
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 8
    network = generate_network(n)  # Generate an adequate network of neurons:
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if i == 0 or (not i % 1000):
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' %
                         (i // 1000 + 1))

        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        # if 神经元覆盖了城市
        #   print('在第{}次迭代,收敛'.format(i))
    else:
        print('Completed {} iterations.'.format(iterations))
    plot_network(cities, network)
    route = get_route(cities, network)
    plot_route(problem[['x', 'y']], route)
    return route
Exemplo n.º 7
0
def som(problem, iterations, learning_rate=0.8):

    cities = problem.copy()

    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    n = cities.shape[0] * 8

    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)

        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Exemplo n.º 8
0
def som(problem, iterations, learning_rate=0.8):
    '''
    SOM解决TSP问题
    '''
    # 将城市数据归一化处理
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # 神经元数量设定为城市的8倍
    n = cities.shape[0] * 8

    # 建立神经网络
    network = generate_network(n)

    print('创建{} 个神经元. 开始进行迭代:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> 迭代过程 {}/{}'.format(i, iterations), end='\r')
        # 随机选择一个城市
        city = cities.sample(1)[['x', 'y']].values

        #优胜神经元(距离该城市最近的神经元)
        winner_idx = select_closest(network, city)

        # 以该神经元为中心建立高斯分布
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        # 更新神经元的权值,使神经元向被选中城市移动
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        # 学习率衰减,方差衰减
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # 每迭代1000次时作图
        if not i % 1000:
            plot_network(cities,
                         network,
                         name='som_diagrams/{:05d}.png'.format(i))
            pass
        # 判断方差和学习率是否达到阈值
        if n < 1:
            print('方差已经达到阈值,完成执行次数{}'.format(i))
            break
        if learning_rate < 0.001:
            print('学习率已经达到阈值,完成执行次数{}'.format(i))
            break
    else:
        print('完成迭代:{}次'.format(iterations))

    plot_network(cities, network, name='som_diagrams/final.png')
    route = get_route(cities, network)
    cities = cities.reindex(route)
    plot_route(cities, route, 'som_diagrams/route.png')

    # 将多个png文件合成gif文件
    path = os.chdir('.\som_diagrams')
    pic_list = os.listdir()
    create_gif(pic_list, 'result.gif', 0.3)

    return route
Exemplo n.º 9
0
def SOM(args):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    iteration = args.iteration
    learning_rate = args.learning_rate
    decay = args.decay

    out_dir = Path(args.out_dir)
    out_dir.mkdir_p()
    cities = pd.read_csv(Path(args.data_dir)/'data1.csv')
    cities.to_csv(out_dir/'cities.csv')

    cities_nm = cities.copy()
    cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])
    cities_nm.to_csv(out_dir/'cities_nm.csv')


    # The population size is 8 times the number of cities
    n = cities_nm.shape[0] * 8

    # Generate an adequate network of neurons:
    neuron_chain = init_neurons(n)
    print('--> Network of {} neurons created. Starting the iterations:'.format(n))
    best_route=np.array([0])


    best_id=0
    min_loss=0
    losses={}
    losses_decay = {}

    for i in tqdm(range(iteration)):

        # Choose a random city
        city = cities_nm.sample(1)[['x', 'y']].values#随机抽样 random  sampling
        winner_idx = select_closest(neuron_chain, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chain.shape[0])
        # Update the network's weights (closer to the city)
        neuron_chain += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chain)
        # Decay the variables
        learning_rate = learning_rate * decay
        n = n * decay



        if i % args.evaluate_freq==0:
            route = get_route(cities_nm, neuron_chain)

            cities_od = cities.reindex(route)
            loss = route_distance(cities_od)
            losses[i] = loss

            if  min_loss==0 or min_loss > loss:
                min_loss=loss
                best_route = list(route.astype(np.float64))
                best_id = i
                losses_decay[i] = loss
                cities_od.to_csv(out_dir / 'route_{:04d}.csv'.format(i))
                save_neuron_chain(neuron_chain, out_dir / "neuron_chain_{:04d}.npy".format(i))
    #end for

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
            'at {} iterations'.format(i))
            break


    print('Completed {} iterations.'.format(iteration))

    results={}
    results['min_loss'] = min_loss
    results['best_id'] = best_id
    results['best_route'] = best_route
    results['losses_decay'] = losses_decay
    results['losses'] = losses

    p = Path(out_dir / 'results.json')
    with open(p, 'w') as fp:
        json.dump(results, fp)
        print('ok')

    return results