Beispiel #1
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()

    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化

    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 3
    #n = cities.shape[0] * 3   # 测试用,by EE526

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=4)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        if i % 100 == 0:  # 每隔100次画出神经元图像
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' %
                         (i // 100))

        if not i % 100:  # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10,
                                    network.shape[0])  # 高斯核函数是算法的核心
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9994

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route
Beispiel #2
0
def som(problem, iterations, learning_rate=0.8):
    '''
    Solve the TSP using a Self-Organizing Map.
    :params problem(dataframe): 城市坐标 
    :params iterations(int): 最大迭代次数
    :learning_rate(float): 学习率
    :return route
    '''
    cities = problem.copy()

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # The population size is 8 times the number of cities
    # <Hyperparameter:times>
    n = cities.shape[0] * 8

    # Generate an adequate network of neurons:
    # (n,2)
    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        # <Hyperparameter:radius>
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)
        # Decay the variables
        # <Hyperparameter:decay rate>
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Beispiel #3
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    dStep = 100  # 每隔100次保存一次数据
    dSave = np.zeros(iterations // dStep)  # 保存数据
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])  # 归一化


    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 4

    # Generate an adequate network of neurons:
    network = generate_network(cities, n, c=2)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        # Check for plotting interval
        # if i % 100 == 0:      # 每隔100次画出神经元图像
        #     plot_network(cities, network, name='out_files\process\city_network%d.png'%(i//100))

        if not i % 100:   # if i%100==0
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")
        # Choose a random city
        city = cities.sample(1)[['x', 'y']].values   # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # print(winner_idx)  # DEBUG
        # 改进方案, 获胜神经元距离小于阈值,则直接获胜
        if np.linalg.norm(city - network[winner_idx, :], axis=1) < 0.005:  # 求距离
            network[winner_idx, :] = city
            # print(winner_idx)
        else:
            # Generate a filter that applies changes to the winner's gaussian
            gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])  # 高斯核函数是算法的核心
            # Update the network's weights (closer to the city)
            network += gaussian[:, np.newaxis] * learning_rate * (city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
            # 实际上就是为了让对应的移动乘以对应的坐标

        # Decay the variables
        learning_rate = learning_rate * 0.99999
        n = n * 0.9995  # 较好的参数是0.9991-0.9997

        if not i % 100:      # 每隔100次, 求距离
            route = get_route(cities, network)
            p = problem.reindex(route)
            dSave[i // dStep] = route_distance(p)

    else:
        print('Completed {} iterations.'.format(iterations))
    # plot 部分
    plot_network(cities, network)
    route = get_route(cities, network)

    cities = problem.copy()
    citiesReal = cities[['x', 'y']]  # 取实际坐标
    plot_route(citiesReal, route)
    return route, dSave
Beispiel #4
0
def som(instancia,problem, iterations, learning_rate,delta_learning_rate, delta_n,sensi_radio,sensi_learning_rate,factor_neuronas,plotear):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtenemos primero las ciudades normalizadas (con coordenadas en [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    #La población de neuronas se crea con factor_neuronas veces la cantidad de ciudades
    n = cities.shape[0] * factor_neuronas

    # Generamos una adecuada red de neuronas de la forma
    network = generate_network(n)

    if plotear:
        print('Red de {} neuronas creadas. Comenzando las iteraciones:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteración {}/{}'.format(i, iterations), end="\r")
        # Se escoge una ciudad de forma aleatoria
        city = cities.sample(1)[['x', 'y']].values
        #Se busca la neurona más cercana a la ciudad, la winner neuron
        winner_idx = select_closest(network, city)
        #Genera un filtro que aplica los cambios al winner o BMU
        gaussian = get_neighborhood(winner_idx, n//10, network.shape[0])
        # Actualizar los pesos de la red según una distribución gaussiana
        network += gaussian[:,np.newaxis] * learning_rate * (city - network)
        
        # actualizar las parametros
        learning_rate = learning_rate * delta_learning_rate
        n = n * delta_n

        # Chequear para plotear cada 1000 iteraciones
        if plotear:
            if not i % 1000:
                plot_network(cities, network, name='imagenes/'+instancia+'/{:05d}.png'.format(i))

        # Chequear si algún parametro a caído por debajo de la sensibilidad
        if n < sensi_radio:
            print('Radio por debajo de sensibilidad, Se ha terminado la ejecución',
            'a {} las iteraciones'.format(i))
            break
        if learning_rate < sensi_learning_rate:
            print('Learning rate por debajo de sensibilidad, Se ha terminado la ejecución',
            'a las {} iteraciones'.format(i))
            break
    else:
        print('Se han completado las {} iteraciones.'.format(iterations))

    if plotear:
        plot_network(cities, network, name='imagenes/'+instancia+'/final.png')


    route = get_route(cities, network)
    if plotear:
        plot_route(cities, route, 'imagenes/'+instancia+'/route.png')
    return route
Beispiel #5
0
def som(problem, iterations, learning_rate=0.8):
    """Solve the TSP using a Self-Organizing Map."""

    # Obtain the normalized set of cities (w/ coord in [0,1])
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])
    # The population size is 8 times the number of cities   神经元个数
    n = cities.shape[0] * 8
    network = generate_network(n)  # Generate an adequate network of neurons:
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if i == 0 or (not i % 1000):
            plot_network(cities,
                         network,
                         name='out_files\process\city_network%d.png' %
                         (i // 1000 + 1))

        city = cities.sample(1)[['x', 'y']].values  # 随机从cities中选取一组数,1*2数组
        winner_idx = select_closest(network, city)
        # Generate a filter that applies changes to the winner's gaussian
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])
        # Update the network's weights (closer to the city)
        network += gaussian[:, np.newaxis] * learning_rate * (
            city - network)  # np.newaxis在该位置增加一维,变成神经元数*1维
        # 实际上就是为了让对应的移动乘以对应的坐标
        # Decay the variables
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # Check for plotting interval
        # Check if any parameter has completely decayed.
        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        # if 神经元覆盖了城市
        #   print('在第{}次迭代,收敛'.format(i))
    else:
        print('Completed {} iterations.'.format(iterations))
    plot_network(cities, network)
    route = get_route(cities, network)
    plot_route(problem[['x', 'y']], route)
    return route
def algorithm(df, params):
    """
    wrapper function to put each individual algorithm inside
    :param df: dataframe that contains all the input dataset
    :param params: algorithm specific parameters
    :return: a dictionary of { outputname: output content in memory }
    """

    output = {}

    # algorithm specific code
    # construct network analysis
    NW = Network(df, params['relations'])
    output['d3js'] = NW.export_json()
    output['gephi'] = NW.export_gephi()
    output['pajek'] = NW.export_pajek()
    output['assortativity'] = NW.assortativity()
    output['node_attributes'] = NW.node_attributes()
    output['edge_attributes'] = NW.edge_attributes()
    output['strong_components'] = NW.strong_components()
    output['weak_components'] = NW.weak_components()
    output['triads'] = NW.triads()

    # plot network
    pruned_network = NW.prune_network()
    output['div'] = plot.plot_network(
        pruned_network,
        params['layout'],
        params['relations'],
        title=params['relations'] +
        ' Network graph of 500 nodes with highest degree centrality')

    return output
Beispiel #7
0
def som(problem, iterations, learning_rate=0.8):

    cities = problem.copy()

    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    n = cities.shape[0] * 8

    network = generate_network(n)
    print('Network of {} neurons created. Starting the iterations:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> Iteration {}/{}'.format(i, iterations), end="\r")

        city = cities.sample(1)[['x', 'y']].values
        winner_idx = select_closest(network, city)

        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        if not i % 1000:
            plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))

        if n < 1:
            print('Radius has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
        if learning_rate < 0.001:
            print('Learning rate has completely decayed, finishing execution',
                  'at {} iterations'.format(i))
            break
    else:
        print('Completed {} iterations.'.format(iterations))

    plot_network(cities, network, name='diagrams/final.png')

    route = get_route(cities, network)
    plot_route(cities, route, 'diagrams/route.png')
    return route
Beispiel #8
0
def molecular_dynamics(vertices, edges, polys, parameters, T, folder):

	delta_t = parameters['delta_t']
	lx = parameters['lx']
	ly = parameters['ly']
	L = np.array([lx,ly])

	# time
	t = 0
	count = 0

	while t < T:

		# get energy for network
		energy = get_energy(vertices, polys, edges, parameters)
		# print energy		
		
		# get forces for network
		forces = get_forces(vertices, polys, edges, parameters)
		print t, np.sum(forces**2)**(0.5)

		# move vertices
		vertices = move_vertices(vertices, forces, parameters)

		# check for T1 transitions
		polys, edges = T1_transition(vertices, polys, edges, parameters)

		# add routine to write vertices, energy, forces at every time step
		# can be used for plotting routines later...
		# write_vertices(vertices, "%s/%.2f.txt" % (folder,t))
		# write edges?
		# write polygons?
		# if count % 21 == 0:
		plot_network(vertices, polys, L, "%s/%.2f.jpg" % (folder,t))

		count += 1
		t += delta_t

	return
            correct_block = validators.check_block_time(ramificat)
            validators.approve_block(correct_block, validators.all_ids)
            validators.walidate_blockchain(correct_block)
            validators.hide_transactions()
            message = notarries.check_data_availability(
                validators.shard_blockchain[-1])
            verdict = notarries.walidate_challenge(
                message, validators.shard_blockchain[-1])
            if verdict:
                validators.recognized_hider(correct_block)
        communicator.comm.barrier()
        if communicator.rank == 0:
            beacon.burn_stake_bad_commit_availability(8)
            beacon.burn_stake_notarry()
            beacon.burn_stake_bad_commit_availability(10)
            beacon.remove_indebted_nodes(beacon.val_acc_info,
                                         beacon.lower_limit_vali_id,
                                         beacon.pool_vali, 11)
            beacon.remove_indebted_nodes(beacon.notary_acc_info,
                                         beacon.lower_limit_notaries_id,
                                         beacon.pool_notaries, 12)
        communicator.comm.barrier()
        if communicator.rank != 0:
            validators.change_ids(communicator.comm.recv(source=0, tag=11))
            notarries.change_ids(communicator.comm.recv(source=0, tag=12))
    if communicator.rank == 0:
        pass
        plot_network(beacon.peers_in_beacon, communicator.rank)
    else:
        plot_network(validators.peers_in_shard, communicator.rank)
Beispiel #10
0
def som(problem, iterations, learning_rate=0.8):
    '''
    SOM解决TSP问题
    '''
    # 将城市数据归一化处理
    cities = problem.copy()
    cities[['x', 'y']] = normalize(cities[['x', 'y']])

    # 神经元数量设定为城市的8倍
    n = cities.shape[0] * 8

    # 建立神经网络
    network = generate_network(n)

    print('创建{} 个神经元. 开始进行迭代:'.format(n))

    for i in range(iterations):
        if not i % 100:
            print('\t> 迭代过程 {}/{}'.format(i, iterations), end='\r')
        # 随机选择一个城市
        city = cities.sample(1)[['x', 'y']].values

        #优胜神经元(距离该城市最近的神经元)
        winner_idx = select_closest(network, city)

        # 以该神经元为中心建立高斯分布
        gaussian = get_neighborhood(winner_idx, n // 10, network.shape[0])

        # 更新神经元的权值,使神经元向被选中城市移动
        network += gaussian[:, np.newaxis] * learning_rate * (city - network)

        # 学习率衰减,方差衰减
        learning_rate = learning_rate * 0.99997
        n = n * 0.9997

        # 每迭代1000次时作图
        if not i % 1000:
            plot_network(cities,
                         network,
                         name='som_diagrams/{:05d}.png'.format(i))
            pass
        # 判断方差和学习率是否达到阈值
        if n < 1:
            print('方差已经达到阈值,完成执行次数{}'.format(i))
            break
        if learning_rate < 0.001:
            print('学习率已经达到阈值,完成执行次数{}'.format(i))
            break
    else:
        print('完成迭代:{}次'.format(iterations))

    plot_network(cities, network, name='som_diagrams/final.png')
    route = get_route(cities, network)
    cities = cities.reindex(route)
    plot_route(cities, route, 'som_diagrams/route.png')

    # 将多个png文件合成gif文件
    path = os.chdir('.\som_diagrams')
    pic_list = os.listdir()
    create_gif(pic_list, 'result.gif', 0.3)

    return route