def blendThreeParabola():
    baseSurfaces = BaseSurface()
    blend0 = baseSurfaces.blendCP("parabola")
    blend0 = transform.move(blend0, 'y', 20)
    raw = RawSurface(blend0, 'parabola')
    raw.setSharpEdges([])
    top1 = Topology.create(raw)

    blend2 = transform.rotate(blend0, 'z', -2*pi/3)
    raw = RawSurface(blend2, 'parabola')
    raw.setSharpEdges([])
    top2 = Topology.create(raw)

    blend3 = transform.rotate(blend0, 'z', -4*pi/3)
    raw = RawSurface(blend3, 'parabola')
    raw.setSharpEdges([])
    top3 = Topology.create(raw)

    surfaces = [top1, top2, top3]
    tops = [2, 2, 2]
    bottoms = [0, 0, 0]
    aref = [2.5, 2.5]
    arefs = [0.7, 0.7]
    taref = [0.2, 0.2]
    name = 'threeParabola' + mode
    #mode = 'CC'
    topology = Topology.blendSurfaces(surfaces, tops, bottoms, aref, arefs, taref, name, mode)

    topology.writeToFile()
示例#2
0
 def assertComponents(self, image, n):
     topology = Topology()
     topology.calculate(image)
     ncomponents = 0
     for component in topology.get_components():
         ncomponents += 1
     self.assertEqual(ncomponents, n)
示例#3
0
class IspNetwork:
    def __init__(self, topo_name, topo_file):
        self.topo = Topology(topo_name, topo_file)

    def egress_all(self, fake_node, dst_topo):
        result = {}
        for node in dst_topo.nodes():
            nodes_num = networkx.number_of_nodes(self.topo.getGraph())
            result[(fake_node, node)] = nodes_num * CITY_TRAFFIC_VOLUME
        print 'total:{}'.format(nodes_num * CITY_TRAFFIC_VOLUME)
        return result

    def egress_volume(self, egress_nodes, dst_topo):
        values = [0] * len(egress_nodes)
        node_num = dict(zip(egress_nodes, values))
        g = self.topo.getGraph()
        for node in g.nodes():
            egress_distance_dict = {}
            for egress in egress_nodes:
                egress_distance_dict[egress] = networkx.shortest_path(
                    g, node, egress)
            min_val = min(egress_distance_dict.itervalues())
            closest_egress = [
                k for k, v in egress_distance_dict.iteritems() if v == min_val
            ]
            node_num[closest_egress[0]] += 1

        result = {}
        for egress in node_num.keys():
            for node in dst_topo.nodes():
                result[(egress, node)] = node_num[egress] * CITY_TRAFFIC_VOLUME
        return result
示例#4
0
    def __init__(self, numnodes, linkprob = 0.35, seed = None,
                 ipv4prefix = '10.0.0.0/8', ipv6prefix = 'a::/64'):
        assert numnodes > 1
        Topology.__init__(self, numnodes)

        if seed is not None:
            random.seed(seed)

        self.net = self.session.addobj(cls = pycore.nodes.WlanNode)

        p4 = ipaddr.IPv4Prefix(ipv4prefix)
        p6 = ipaddr.IPv6Prefix(ipv6prefix)

        for i in xrange(numnodes):
            addrlist = ['%s/%s' % (p4.addr(i + 1), 32),
                        '%s/%s' % (p6.addr(i + 1), 128)]
            self.n[i].newnetif(self.net, addrlist = addrlist, ifname = 'eth0')

        # connect nodes with probability linkprob
        for i in xrange(numnodes):
            netif = self.n[i].netif(0)
            for j in xrange(i + 1, numnodes):
                r = random.random()
                if r < linkprob:
                    self.net.link(netif, self.n[j].netif(0))
            if not self.net._linked[netif]:
                # force one link to avoid partitions
                j = i
                while j == i:
                    j = random.randint(0, numnodes - 1)
                self.net.link(netif, self.n[j].netif(0))
def blendThreeHyperbola():
    baseSurfaces = BaseSurface()
    blend0 = baseSurfaces.blendCP('hyperbola')
    blend1 = transform.rotate(blend0, 'z', -pi/4)
    blend1 = transform.move(blend1, 'x', 70)
    blend1 = transform.move(blend1, 'y', 80)
    raw = RawSurface(blend1, 'hyperbola')
    raw.setSharpEdges([-1])
    top1 = Topology.create(raw)

    blend2 = transform.rotate(blend0, 'z', -3*pi/4)
    blend2 = transform.move(blend2, 'x', 70)
    blend2 = transform.move(blend2, 'y', -80)
    raw = RawSurface(blend2, 'hyperbola')
    raw.setSharpEdges([-1])
    top2 = Topology.create(raw)

    blend3 = transform.rotate(blend0, 'z', pi/2)
    blend3 = transform.move(blend3, 'x', -70)
    raw = RawSurface(blend3, 'hyperbola')
    raw.setSharpEdges([-1])
    top3 = Topology.create(raw)

    surfaces = [top1, top2, top3]

    tops = [0, 0, 0]
    bottoms = [2, 2, 2]
    aref = [3, 3] #centerPoints
    arefs = [1, 1] #grooves
    taref = [0.2, 0.2] #offsetCenterPoints
    name = 'threeHyperbola'+mode
    #mode = 'CC' #'DS'
    topology = Topology.blendSurfaces(surfaces,tops, bottoms, aref, arefs, taref, name, mode)

    topology.writeToFile()
示例#6
0
    def __init__(self):

        cnf = config.CONFIG()
        #        with open(cnf.topologyJson,'r') as json_data:
        #            myjson = json.load(json_data)
        #            json_data.close()
        #
        f = open(cnf.topologyJson, 'r')
        txtjson = f.read()
        f.close()
        txtjson = txtjson.replace("Entity.ENTITY_FOG", "\"Entity.ENTITY_FOG\"")
        txtjson = txtjson.replace("Entity.ENTITY_CLUSTER",
                                  "\"Entity.ENTITY_CLUSTER\"")
        myjson = json.loads(txtjson)

        print myjson

        t = Topology()
        t.load(myjson)

        devDistanceMatrix = [[0 for j in xrange(len(t.G.nodes))]
                             for i in xrange(len(t.G.nodes))]

        for i in range(0, len(t.G.nodes)):
            for j in range(i, len(t.G.nodes)):

                mylength = nx.shortest_path_length(t.G,
                                                   source=i,
                                                   target=j,
                                                   weight="weight")
                devDistanceMatrix[i][j] = mylength
                devDistanceMatrix[j][i] = mylength
示例#7
0
    def __init__(self, cost, parameters, update_equation, extra_layers=None):

        if not isinstance(parameters, v2_parameters.Parameters):
            raise TypeError('parameters should be parameters')

        if not isinstance(update_equation, v2_optimizer.Optimizer):
            raise TypeError("update equation parameter must be "
                            "paddle.v2.optimizer.Optimizer")
        topology = Topology(cost, extra_layers=extra_layers)
        self.__optimizer__ = update_equation
        self.__topology__ = topology
        self.__parameters__ = parameters
        self.__topology_in_proto__ = topology.proto()

        # In local mode, disable sparse_remote_update.
        for param in self.__topology_in_proto__.parameters:
            if param.sparse_remote_update:
                param.sparse_remote_update = False

        self.__data_types__ = topology.data_type()
        gm = api.GradientMachine.createFromConfigProto(
            self.__topology_in_proto__, api.CREATE_MODE_NORMAL,
            self.__optimizer__.enable_types())
        assert isinstance(gm, api.GradientMachine)
        self.__gradient_machine__ = gm
        self.__gradient_machine__.randParameters()
        parameters.append_gradient_machine(gm)
def empty_snapshot_from_openmm_topology(topology, simple_topology=False):
    """
    Return an empty snapshot from an openmm.Topology object

    Velocities will be set to zero.

    Parameters
    ----------
    topology : openmm.Topology
        the topology representing the structure and number of atoms
    simple_topology : bool
        if `True` only a simple topology with n_atoms will be created.
        This cannot be used with complex CVs but loads and stores very fast

    Returns
    -------
    openpathsampling.engines.Snapshot
        the complete snapshot with zero coordinates and velocities

    """
    n_atoms = topology.n_atoms

    if simple_topology:
        topology = Topology(n_atoms, 3)
    else:
        topology = MDTrajTopology(md.Topology.from_openmm(topology))

    snapshot = Snapshot.construct(
        coordinates=u.Quantity(np.zeros((n_atoms, 3)), u.nanometers),
        box_vectors=u.Quantity(topology.setUnitCellDimensions(), u.nanometers),
        velocities=u.Quantity(np.zeros((n_atoms, 3)),
                              u.nanometers / u.picoseconds),
        engine=TopologyEngine(topology))

    return snapshot
示例#9
0
def start_client(client_name, port):
    topo = Topology()
    server_ip = topo.get_host_neighbor(client_name)['ip']
    client_ip = topo.get_node_info(client_name)['ip']
    client = SocketClient(client_ip=client_ip,
                          server_ip=server_ip,
                          server_port=port)
    client.start_client()
示例#10
0
    def put_in_range(self, other, s):
        self.w.add_visible_mac(other.mac, s)
        other.w.add_visible_mac(self.mac, s)

        Topology.simulate()
        WifiSim.simulate()
        Topology.simulate()
        WifiSim.simulate()
示例#11
0
 def test_get_components_height(self):
     image = self.chess_board()
     image[self.SIZE/2, :] = 255
     topology = Topology()
     topology.calculate(image)
     ncomponents = 0
     for component in topology.get_components():
         ncomponents += 1
     self.assertEqual(ncomponents, 2)
示例#12
0
    def take_out_of_range(self, other):
        self.w.remove_visible_mac(other.mac)
        other.w.remove_visible_mac(self.mac)
        self.w.mac_disconnected(other.mac)
        other.w.mac_disconnected(self.mac)

        Topology.simulate()
        WifiSim.simulate()
        Topology.simulate()
        WifiSim.simulate()
示例#13
0
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()
    def __init__(self):
        self.xLimitUp = 0
        self.xLimitDown = 0
        self.yLimitUp = 0
        self.yLimitDown = 0

        self.brush = Brushfires()
        self.topo = Topology()
        self.target = [-1, -1]
        self.previousTarget = [-1, -1]
        self.costs = []
示例#15
0
def create(layers):
    """
    Create parameter pool by topology.

    :param layers:
    :return:
    """
    topology = Topology(layers)
    pool = Parameters()
    for param in topology.proto().parameters:
        pool.__append_config__(param)
    return pool
def blendThreeCylinderNonFlat():
    baseSurfaces = BaseSurface()
    blend0 = baseSurfaces.blendCP('cylinder')
    blend1 = transform.rotate(blend0, 'x', pi/8.0)
    blend1 = transform.rotate(blend1, 'z', -pi/4.0)
    blend1 = transform.move(blend1, 'x', 1.5)
    blend1 = transform.move(blend1, 'y', 2)


    raw = RawSurface(blend1, 'cylinder')
    raw.setSharpEdges([-1])


    top1 = Topology.create(raw)

    blend2 = transform.rotate(blend0, 'x', pi/8.0)
    blend2 = transform.rotate(blend2, 'z', -3.0*pi/4.0)
    blend2 = transform.move(blend2, 'x', 1.5)
    blend2 = transform.move(blend2, 'y', -2)


    raw = RawSurface(blend2, 'cylinder')
    raw.setSharpEdges([-1])

    top2 = Topology.create(raw)

    '''
    blend3 = transform.rotate(blend0, 'x', pi/8.0)
    blend3 = transform.rotate(blend3, 'z', pi/2.0)
    blend3 = transform.move(blend3, 'x', -3)
    '''
    blend3 = baseSurfaces.blendCP('circular')
    blend3 = transform.rotate(blend3, 'x', pi/8)
    blend3 = transform.rotate(blend3, 'z', pi/2)
    blend3 = transform.move(blend3, 'x', -3)


    raw = RawSurface(blend3, 'circular')
    raw.setSharpEdges([])

    top3 = Topology.create(raw)

    surfaces = [top1, top2, top3]
    tops = [2, 2, 2]
    bottoms = [0, 0, 0]
    aref = [2.1, 1.3]
    arefs = [0.8, 0.5]
    taref = [0.1, 0.2]
    name = 'threeCylinderNonFlat' + mode
    #mode = 'CC'
    topology = Topology.blendSurfaces(surfaces,tops, bottoms, aref, arefs, taref, name, mode)

    topology.writeToFile()
示例#17
0
def create(layers):
    """
    Create parameter pool by topology.

    :param layers:
    :return:
    """
    topology = Topology(layers)
    pool = Parameters()
    for param in topology.proto().parameters:
        pool.__append_config__(param)
    return pool
示例#18
0
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()

        # Initialize previous target
        self.previous_target = [-1, -1]
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.path = []
        self.prev_target = [0, 0]
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()
        self.robot_perception = RobotPerception()  # can i use that?
示例#20
0
def create(layers):
    """
    Create parameter pool by topology.

    :param layers:
    :return:
    """
    topology = Topology(layers)
    pool = Parameters()
    initializers = cp.g_parameter_initializer_map
    for param in topology.proto().parameters:
        pool.__append_config__(param)
        if param.name in initializers:
            pool[param.name] = initializers[param.name](param.name)
    return pool
示例#21
0
def create(layers):
    """
    Create parameter pool by topology.

    :param layers:
    :return:
    """
    topology = Topology(layers)
    pool = Parameters()
    initializers = cp.g_parameter_initializer_map
    for param in topology.proto().parameters:
        pool.__append_config__(param)
        if param.name in initializers:
            pool[param.name] = initializers[param.name](param.name)
    return pool
示例#22
0
def independent_routing(cp_num):
    cpNets = []
    ispTopo = Topology('isp_network', ISP_TOPO_DIR)
    for i in range(cp_num):
        cpNets.append(CpNetwork('Abilene', CP_TOPO_DIR))

    trafficMatrix = {}
    for i in range(cp_num):
        trafficMatrix[i] = cpNets[i].egress_max_throughput(10000, ispTopo)

    ispNet = IspNetwork('isp_network', ISP_TOPO_DIR)
    ispNet.linkcaps = set_link_caps(ispNet.topo)
    pptc, throughput = ispNet.calc_path_maxminfair(trafficMatrix)

    ingress_bw_dict = {}
    for i in range(cp_num):
        ingress_bw_dict[i] = {}
    for tc, paths in pptc.iteritems():
        for path in paths:
            nodes = path.getNodes()
            ingress = nodes[0]
            if ingress in ingress_bw_dict[tc.network_id]:
                ingress_bw_dict[tc.network_id][ingress] += path.bw
            else:
                ingress_bw_dict[tc.network_id][ingress] = path.bw

    for id, bw_dict in ingress_bw_dict.iteritems():
        print 'network id:{}'.format(id)
        for ingress, bw in bw_dict.iteritems():
            print '{}:{}'.format(ingress, bw)

    with open(INDEPENDENT_LOG_DIR, 'a') as f:
        f.write(str(throughput))
        f.write('\n')
        '''f.write('independent routing\n')
示例#23
0
 def test_large_node_topology(self):
     env = simpy.Environment()
     result = Topology(env=env, num_nodes=1000, num_neighbours=8)
     self.assertEqual(result.num_nodes, 1000)
     self.assertEqual(result.num_neighbours, 8)
     for node in result.nodes.values():
         self.assertEqual(len(node.neighbours), 8)
示例#24
0
def snapshot_from_pdb(pdb_file, simple_topology=False):
    """
    Construct a Snapshot from the first frame in a pdb file without velocities

    Parameters
    ----------
    pdb_file : str
        The filename of the .pdb file to be used
    simple_topology : bool
        if `True` only a simple topology with n_atoms will be created.
        This cannot be used with complex CVs but loads and stores very fast

    Returns
    -------
    :class:`openpathsampling.engines.Snapshot`
        the constructed Snapshot

    """
    pdb = md.load(pdb_file)
    velocities = np.zeros(pdb.xyz[0].shape)

    if simple_topology:
        topology = Topology(*pdb.xyz[0].shape)
    else:
        topology = MDTrajTopology(pdb.topology)

    snapshot = Snapshot.construct(
        coordinates=u.Quantity(pdb.xyz[0], u.nanometers),
        box_vectors=u.Quantity(pdb.unitcell_vectors[0], u.nanometers),
        velocities=u.Quantity(velocities, u.nanometers / u.picoseconds),
        engine=FileEngine(topology, pdb_file))

    return snapshot
示例#25
0
def topology_from_pdb(pdb_file, simple_topology=False):
    """
    Construct a Topology from the first frame in a pdb file without velocities

    Parameters
    ----------
    pdb_file : str
        The filename of the .pdb file to be used
    simple_topology : bool
        if `True` only a simple topology with n_atoms will be created.
        This cannot be used with complex CVs but loads and stores very fast

    Returns
    -------
    :class:`openpathsampling.engines.Snapshot`
        the constructed Snapshot

    """
    pdb = md.load(pdb_file)

    if simple_topology:
        topology = Topology(*pdb.xyz[0].shape)
    else:
        topology = MDTrajTopology(pdb.topology)

    return topology
示例#26
0
def shortest_path():
    ispNets = []
    cpTopo = Topology('CP_network', './data/topologies/simple.graphml')
    isp_num = 2
    for i in range(isp_num):
        ispNets.append(
            IspNetwork('Abilene', './data/topologies/Abilene.graphml'))

    trafficMatrix = {}
    for i in range(isp_num):
        trafficMatrix[i] = ispNets[i].egress_volume([0, 1], cpTopo)

    cpNet = CpNetwork('CP_network', './data/topologies/simple.graphml')
    pptc = cpNet.calc_path_sp(trafficMatrix)
    ingress_bw_dict = {}
    for i in range(isp_num):
        ingress_bw_dict[i] = {}
    for tc, paths in pptc.iteritems():
        ingress = tc.src
        if ingress in ingress_bw_dict[tc.network_id]:
            ingress_bw_dict[tc.network_id][ingress] += tc.allocate_bw
        else:
            ingress_bw_dict[tc.network_id][ingress] = tc.allocate_bw
    for id, bw_dict in ingress_bw_dict.iteritems():
        print "isp network id:{}".format(id)
        for egress, bw in bw_dict.iteritems():
            print "egress:{} bw:{}".format(egress, bw)
 def __init__(self, selection_method):
     self.goals_position = []
     self.goals_value = []
     self.omega = 0.0
     self.radius = 0
     self.method = selection_method
     self.previous_target = []
     self.brush = Brushfires()
     self.topo = Topology()
     self.path_planning = PathPlanning()
     self.previous_target.append(50)
     self.previous_target.append(50)
     self.node2_index_x = 0
     self.node2_index_y = 0
     self.sonar = SonarDataAggregator()
     self.timeout_happened = 0
示例#28
0
async def main_loop(topology: Topology, tick: int):
    print("Main Loop Started")
    # TODO: listen for graceful exit
    while True:
        await asyncio.sleep(tick)

        node_id = randint(0, topology.size - 1)
        payload = randint(100, 999)
        topology.send_message(node_id, payload)

        for n in topology.pool:
            try:
                msg = n.output_queue.get_nowait()
                topology.broadcast_message(msg)
            except asyncio.QueueEmpty:
                pass
示例#29
0
    def create_topology(cls, debug=False):

        cls.topology = Topology()
        cls.topology.create_topology(debug)

        cls.vpp = cls.topology.get_vpp()
        cls.netopeer_cli = cls.topology.get_netopeer_cli()
        cls.vppctl = vppctl.Vppctl()
示例#30
0
def shortest_gfi(cp_num):
    cpNetworks = []
    ispTopo = Topology('isp_network', './data/topologies/simple.graphml')
    for i in range(cp_num):
        cpNetworks.append(
            CpNetwork('Abilene', './data/topologies/Abilene.graphml'))

    trafficMatrix = {}
    for i in range(cp_num):
        trafficMatrix[i] = cpNetworks[i].egress_volume_shortest([0, 1],
                                                                ispTopo)

    ispNet = IspNetwork('isp_network', './data/topologies/simple.graphml')
    pptc, throughput = ispNet.calc_path_maxminfair(trafficMatrix)

    pptc_dict = {}
    for i in range(cp_num):
        pptc_dict[i] = {}
    for tc, paths in pptc.iteritems():
        pptc_dict[tc.network_id][copy.deepcopy(tc)] = copy.deepcopy(paths)

    pptc_iso_dict = {}
    for i in range(cp_num):
        ispNet_local = IspNetwork('isp_network',
                                  './data/topologies/simple.graphml')
        tm = {}
        pptc_iso_dict[i] = {}
        tm.update({i: trafficMatrix[i]})
        pptc, throughput = ispNet_local.calc_path_shortest(tm)
        for tc, paths in pptc.iteritems():
            print tc
            print paths
            pptc_iso_dict[i][copy.deepcopy(tc)] = copy.deepcopy(paths)

    gfi = calc_gfi(pptc_dict, pptc_iso_dict, cp_num, ispNet)

    ingress_bw_dict = {}
    for i in range(cp_num):
        ingress_bw_dict[i] = {}
    for tc, paths in pptc.iteritems():
        for path in paths:
            nodes = path.getNodes()
            ingress = nodes[0]
            if ingress in ingress_bw_dict[tc.network_id]:
                ingress_bw_dict[tc.network_id][ingress] += path.bw
            else:
                ingress_bw_dict[tc.network_id][ingress] = path.bw

    for id, bw_dict in ingress_bw_dict.iteritems():
        print "network id:{}".format(id)
        for ingress, bw in bw_dict.iteritems():
            print '{}:{}'.format(ingress, bw)

    #log to file
    with open(SHORTEST_LOG_DIR, 'a') as f:
        f.write(str(gfi))
        f.write('\n')
        '''f.write('default routing \n')
示例#31
0
def plot_sf(number_of_nodes_list, averaging, topology_radius, number_of_gws,
            packet_rate, packet_size, simulation_duration, traffic_type):
    sf_list = [
        PacketSf.SF_7, PacketSf.SF_8, PacketSf.SF_9, PacketSf.SF_10,
        PacketSf.SF_11, PacketSf.SF_12, PacketSf.SF_Lowest, PacketSf.SF_Random
    ]
    sf_pdr_figure = SimulationFigure(number_of_nodes_list,
                                     [sf.name for sf in sf_list])
    sf_energy_figure = SimulationFigure(number_of_nodes_list,
                                        [sf.name for sf in sf_list])

    for sf in sf_list:
        sys.stdout.write('\n{} '.format(sf))
        sys.stdout.flush()
        for number_of_nodes in number_of_nodes_list:
            sys.stdout.write('.')
            sys.stdout.flush()
            simulation_result_sum = SimulationResult()
            for repeat in range(averaging):
                topology = Topology.create_random_topology(
                    number_of_nodes=number_of_nodes,
                    radius=topology_radius,
                    number_of_gws=number_of_gws,
                    node_traffic_proportions=traffic_type)
                simulation = Simulation(
                    topology=topology,
                    packet_rate=packet_rate,
                    packet_size=packet_size,
                    simulation_duration=simulation_duration,
                    sf=sf)
                simulation_result_sum += simulation.run()
            sf_pdr_figure.plot_data[sf.name].append(
                float(simulation_result_sum.pdr) / averaging)
            sf_energy_figure.plot_data[sf.name].append(
                float(simulation_result_sum.txEnergyConsumption) / averaging)

    sf_pdr_figure.get_plot(xlabel='Number of nodes',
                           ylabel='PDR (%)',
                           ylim_bottom=0,
                           xlim_left=0,
                           xlim_right=1000)
    plt.legend(loc='upper right', fontsize='small', title='SF', ncol=2)
    plt.savefig('output/sf_pdr_r{}_g{}_p{}_s{}.png'.format(
        topology_radius, number_of_gws, packet_rate, simulation_duration),
                dpi=200,
                transparent=True)

    sf_energy_figure.get_plot(xlabel='Number of nodes',
                              ylabel='Transmit energy consumption (J)',
                              ylim_bottom=0,
                              xlim_left=0,
                              xlim_right=1000)
    plt.legend(loc='upper left', fontsize='small', title='SF', ncol=2)
    plt.savefig('output/sf_energy_r{}_g{}_p{}_s{}.png'.format(
        topology_radius, number_of_gws, packet_rate, simulation_duration),
                dpi=200,
                transparent=True)
示例#32
0
    def __init__(self,
                 cost,
                 parameters,
                 update_equation,
                 extra_layers=None,
                 is_local=True,
                 pserver_spec=None,
                 use_etcd=True):

        if not isinstance(parameters, v2_parameters.Parameters):
            raise TypeError('parameters should be parameters')

        if not isinstance(update_equation, v2_optimizer.Optimizer):
            raise TypeError("update equation parameter must be "
                            "paddle.v2.optimizer.Optimizer")
        import py_paddle.swig_paddle as api
        topology = Topology(cost, extra_layers=extra_layers)
        self.__optimizer__ = update_equation
        self.__topology__ = topology
        self.__parameters__ = parameters
        self.__topology_in_proto__ = topology.proto()
        self.__is_local__ = is_local
        self.__pserver_spec__ = pserver_spec
        self.__use_etcd__ = use_etcd

        self.__use_sparse_updater__ = self.__topology__.use_sparse_updater()
        # # In local mode, disable sparse_remote_update.
        if is_local:
            for param in self.__topology_in_proto__.parameters:
                if param.sparse_remote_update:
                    param.sparse_remote_update = False

        self.__gm_create_mode__ = api.CREATE_MODE_NORMAL if not \
            self.__use_sparse_updater__ else api.CREATE_MODE_SGD_SPARSE_CPU_TRAINING
        self.__data_types__ = topology.data_type()
        gm = api.GradientMachine.createFromConfigProto(
            self.__topology_in_proto__, self.__gm_create_mode__,
            self.__optimizer__.enable_types())
        assert isinstance(gm, api.GradientMachine)
        self.__gradient_machine__ = gm
        self.__gradient_machine__.randParameters()
        self.__parameters__.append_gradient_machine(gm)
        self.__parameter_updater__ = None
示例#33
0
 def test_three_node_topology(self):
     env = simpy.Environment()
     result = Topology(env=env)
     self.assertEqual(result.num_nodes, 3)
     self.assertEqual(result.num_neighbours, 2)
     self.assertCountEqual([n.name for n in result.nodes[0].neighbours],
                           [1, 2])
     self.assertCountEqual([n.name for n in result.nodes[1].neighbours],
                           [0, 2])
     self.assertCountEqual([n.name for n in result.nodes[2].neighbours],
                           [0, 1])
示例#34
0
	def __init__(self, rdf_file_name, host_name, port_number='8888'):
		self.g=rdflib.Graph()
		self.g.load(rdf_file_name)
		self.prefix = """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> 
			PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> 
			PREFIX ndl: <http://www.science.uva.nl/research/sne/ndl#> """;
		self.uri="http://"+host_name+":"+port_number+"/"
		print("URI:"+self.uri)
		self.inode_object = IntermediateNode()
		self.topology_object = Topology()
		self.clean_all()
示例#35
0
    def __init__(self,
                 numnodes,
                 ipv4prefix='10.0.0.0/30',
                 ipv6prefix='a::/126'):
        assert numnodes > 1
        Topology.__init__(self, numnodes)

        self.net = []
        p4 = ipaddr.IPv4Prefix(ipv4prefix)
        p6 = ipaddr.IPv6Prefix(ipv6prefix)

        self.net.append(self.session.addobj(cls=pycore.nodes.SwitchNode))
        addrlist = [
            '%s/%s' % (p4.addr(1), p4.prefixlen),
            '%s/%s' % (p6.addr(1), p6.prefixlen)
        ]
        self.n[0].newnetif(self.net[0], addrlist=addrlist, ifname='eth0')

        for i in xrange(1, numnodes - 1):
            addrlist = [
                '%s/%s' % (p4.addr(2), p4.prefixlen),
                '%s/%s' % (p6.addr(2), p6.prefixlen)
            ]
            self.n[i].newnetif(self.net[i - 1],
                               addrlist=addrlist,
                               ifname='eth0')
            p4 += 1
            p6 += 1
            self.net.append(self.session.addobj(cls=pycore.nodes.SwitchNode))
            addrlist = [
                '%s/%s' % (p4.addr(1), p4.prefixlen),
                '%s/%s' % (p6.addr(1), p6.prefixlen)
            ]
            self.n[i].newnetif(self.net[i], addrlist=addrlist, ifname='eth1')

        i += 1
        addrlist = [
            '%s/%s' % (p4.addr(2), p4.prefixlen),
            '%s/%s' % (p6.addr(2), p6.prefixlen)
        ]
        self.n[i].newnetif(self.net[i - 1], addrlist=addrlist, ifname='eth0')
    def __init__(self, selection_method):

        self.initial_time = time()
        self.method = selection_method
        self.initialize_gains = False

        self.brush = Brushfires()
        self.topology = Topology()
        self.path_planning = PathPlanning()
        self.droneConnector = DroneCommunication()

        # Parameters from YAML File
        self.debug = True  #rospy.get_param('debug')
        self.map_discovery_purpose = rospy.get_param('map_discovery_purpose')
        self.color_evaluation_flag = rospy.get_param('color_rating')
        self.drone_color_evaluation_topic = rospy.get_param(
            'drone_pub_color_rating')
        self.evaluate_potential_targets_srv_name = rospy.get_param(
            'rate_potential_targets_srv')

        # Explore Gains
        self.g_color = 0.0
        self.g_brush = 0.0
        self.g_corner = 0.0
        self.g_distance = 0.0
        self.set_gain()

        if self.color_evaluation_flag:

            # Color Evaluation Service
            self.color_evaluation_service = rospy.ServiceProxy(
                self.evaluate_potential_targets_srv_name, EvaluateTargets)
            # Subscribe to Color Evaluation Topic to Get Results from Color Evaluation
            self.drone_color_evaluation_sub = rospy.Subscriber(
                self.drone_color_evaluation_topic, ColorEvaluationArray,
                self.color_evaluation_cb)
            # Parameters
            self.targets_color_evaluated = False  # Set True Once Color Evaluation of Targets Completed
            self.color_evaluation = []  # Holds the Color Evaluation of Targets
            self.corner_evaluation = [
            ]  # Holds the Number of Corners Near Each Target
示例#37
0
def plot_pr(number_of_nodes_list, averaging, topology_radius, number_of_gws,
            packet_size, simulation_duration, traffic_type):
    packet_rate_list = [0.005, 0.01, 0.02, 0.04, 0.08]
    pr_pdr_figure = SimulationFigure(number_of_nodes_list, packet_rate_list)
    pr_energy_figure = SimulationFigure(number_of_nodes_list, packet_rate_list)

    for packet_rate in packet_rate_list:
        sys.stdout.write('\n{} '.format(packet_rate))
        sys.stdout.flush()
        for number_of_nodes in number_of_nodes_list:
            sys.stdout.write('.')
            sys.stdout.flush()
            simulation_result_sum = SimulationResult()
            for repeat in range(averaging):
                topology = Topology.create_random_topology(
                    number_of_nodes=number_of_nodes,
                    radius=topology_radius,
                    number_of_gws=number_of_gws,
                    node_traffic_proportions=traffic_type)
                simulation = Simulation(
                    topology=topology,
                    packet_rate=packet_rate,
                    packet_size=packet_size,
                    simulation_duration=simulation_duration,
                    sf=PacketSf.SF_Lowest)
                simulation_result_sum += simulation.run()
            pr_pdr_figure.plot_data[packet_rate].append(
                float(simulation_result_sum.pdr) / averaging)
            pr_energy_figure.plot_data[packet_rate].append(
                float(simulation_result_sum.txEnergyConsumption) / averaging)

    pr_pdr_figure.get_plot(xlabel='Number of nodes',
                           ylabel='PDR (%)',
                           ylim_bottom=0,
                           xlim_left=0,
                           xlim_right=1000)
    plt.legend(fontsize='small', title='Packet Rate (pps)')
    plt.savefig('output/pr_pdr_r{}_g{}_s{}.png'.format(topology_radius,
                                                       number_of_gws,
                                                       simulation_duration),
                dpi=200,
                transparent=True)

    pr_energy_figure.get_plot(xlabel='Number of nodes',
                              ylabel='Transmit energy consumption (J)',
                              ylim_bottom=0,
                              xlim_left=0,
                              xlim_right=1000)
    plt.legend(fontsize='small', title='Packet Rate (pps)')
    plt.savefig('output/pr_energy_r{}_g{}_s{}.png'.format(
        topology_radius, number_of_gws, simulation_duration),
                dpi=200,
                transparent=True)
示例#38
0
 def __init__(self, num_slices=3):
     self.num_slices = num_slices
     self.topology = Topology(1, 2, 3, 1000)
     self.slices = []
     self.flows = []
     self.num_flow_to_route = 0
     self.edge_set = None
     self.action_space = spaces.Discrete(num_slices)
     edge_feature_low = np.zeros((len(self.topology.graph.edges), ),
                                 dtype=np.float)
     edge_feature_high = np.full((len(self.topology.graph.edges), ), np.inf)
     self.observation_space = spaces.Dict(
         dict(
             {
                 'flow':
                 spaces.Box(low=edge_feature_low, high=edge_feature_high)
             }, **{
                 'slice' + str(i): spaces.Box(low=edge_feature_low,
                                              high=edge_feature_high)
                 for i in range(self.num_slices)
             }))
示例#39
0
def main():
    '''Main entry point for the yans CLI.'''
    args = docopt(__doc__, version=__version__)

    ensure_docker_machine()

    if args['--verbose']:
        logging.getLogger().setLevel(logging.DEBUG)

    topo_file = args['--topo']
    try:
        topo = Topology(topo_file)
    except TopologySpecError as err:
        sys.exit(err)

    if args['up']:
        create_links(topo.links)
        create_nodes(topo.nodes)
        for link in topo.links:
            for interface in link.interfaces:
                bind_interface(interface)
        topo.draw()
        print('To run commands in each node:')
        for node in topo.nodes:
            print('`$ yans -t ' + topo_file + ' console ' + node.name +
                  ' -c <"Commands">`')

    if args['destroy']:
        destroy_nodes(topo.nodes)
        destroy_links(topo.links)

    if args['console']:
        node_name = args['<node_name>']
        commands = args['<commands>']
        node = topo.node_by_name(node_name)
        if node:
            exec_in_node(node, commands)
        else:
            sys.exit('Node named "' + node_name + '" is not found in ' +
                     topo_file)
示例#40
0
def create_api_service(opts={}):
    # loads extensions
    ext_dict = load_exts()

    # creates topology
    topology = Topology()

    # creates api service
    servant = Servant(topology)
    scheduler = Scheduler()
    proxy = Proxy(scheduler, servant)

    # configures api service
    scheduler.daemon = True
    servant.set_operation_dict(get_operation_dict())

    # configures topology
    set_ext_dict_to_topology(ext_dict, topology)
    if 'is_exec_nothing' not in opts or not opts['is_exec_nothing']:
        topology.set_cmdexecutor(exec_linux_cmd)

    return proxy
示例#41
0
    def __init__(self, config):
        '''
        Constructor
        '''
        #=======================================================================
        # By this time, topology has configured itself based on the config file 
        # or the defaults
        #=======================================================================
        self.config = config
        self.topology = Topology(config)

        self.cache = Cache(reconstruct=self.config.local_reconstruct)

        self.logger.info('setup up communicator for node: ' + str(self.config.node_id) + '\n')
示例#42
0
def empty_snapshot_from_openmm_topology(topology, simple_topology=False):
    """
    Return an empty snapshot from an openmm.Topology object

    Velocities will be set to zero.

    Parameters
    ----------
    topology : openmm.Topology
        the topology representing the structure and number of atoms
    simple_topology : bool
        if `True` only a simple topology with n_atoms will be created.
        This cannot be used with complex CVs but loads and stores very fast

    Returns
    -------
    openpathsampling.engines.Snapshot
        the complete snapshot with zero coordinates and velocities

    """
    n_atoms = topology.n_atoms

    if simple_topology:
        topology = Topology(n_atoms, 3)
    else:
        topology = MDTrajTopology(md.Topology.from_openmm(topology))

    snapshot = Snapshot.construct(
        coordinates=u.Quantity(np.zeros((n_atoms, 3)), u.nanometers),
        box_vectors=u.Quantity(topology.setUnitCellDimensions(), u.nanometers),
        velocities=u.Quantity(
            np.zeros((n_atoms, 3)), u.nanometers / u.picoseconds),
        engine=TopologyEngine(topology)
    )

    return snapshot
def blendSixCylinder():
    baseSurfaces = BaseSurface()
    blend0 = baseSurfaces.blendCP('cylinder')
    blend0 = transform.move(blend0, 'y', 3.5)



    raw = RawSurface(blend0, 'cylinder')
    raw.setSharpEdges([-1])
    top1 = Topology.create(raw)


    blend2 = transform.rotate(blend0, 'z', -pi/3)
    raw = RawSurface(blend2, 'cylinder')
    raw.setSharpEdges([-1])

    top2 = Topology.create(raw)

    blend3 = transform.rotate(blend0, 'z', -2*pi/3.0)
    raw = RawSurface(blend3, 'cylinder')
    raw.setSharpEdges([-1])
    top3 = Topology.create(raw)

    blend4 = transform.rotate(blend0, 'z', -pi)
    raw = RawSurface(blend4, 'cylinder')
    raw.setSharpEdges([-1])
    top4 = Topology.create(raw)

    blend5 = transform.rotate(blend0, 'z', -4*pi/3)
    raw = RawSurface(blend5, 'cylinder')
    raw.setSharpEdges([-1])
    top5 = Topology.create(raw)

    blend6 = transform.rotate(blend0, 'z', -5*pi/3)
    raw = RawSurface(blend6, 'cylinder')
    raw.setSharpEdges([-1])
    top6 = Topology.create(raw)

    surfaces = [top1, top2, top3, top4, top5, top6]
    tops = [2, 2, 2, 2, 2, 2]
    bottoms = [0, 0, 0, 0, 0, 0]
    aref = [1.5, 1.5]
    arefs = [0.5, 0.5]
    taref = [0.2, 0.2]
    name = 'sixCylinder' + mode
    #mode = 'DS'
    topology = Topology.blendSurfaces(surfaces,tops, bottoms, aref, arefs, taref, name, mode)

    topology.writeToFile()
示例#44
0
def main(argvs):
    if len(argvs) != 3:
        print ('Usage: # python %s <topology json filepath> <COMMAND:create|delete>'
            % (argvs[0], ))
        quit()
    else:
        topo_json_filepath = argvs[1]
        command = argvs[2]

    topology = Topology()
    topology.load(topo_json_filepath)
    if command == 'create':
        print 'Creating topology ... '
        topology.create()
    elif command == 'delete':
        print 'Deleting topology ... '
        topology.delete()
示例#45
0
    def __init__(self,
                 cost,
                 parameters,
                 update_equation,
                 extra_layers=None,
                 is_local=True,
                 pserver_spec=None,
                 use_etcd=True):

        if not isinstance(parameters, v2_parameters.Parameters):
            raise TypeError('parameters should be parameters')

        if not isinstance(update_equation, v2_optimizer.Optimizer):
            raise TypeError("update equation parameter must be "
                            "paddle.v2.optimizer.Optimizer")
        import py_paddle.swig_paddle as api
        topology = Topology(cost, extra_layers=extra_layers)
        # HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers
        # are defined after layers, or between layers.
        topology.update_from_default()
        parameters.update_param_conf(topology.proto())

        self.__optimizer__ = update_equation
        self.__topology__ = topology
        self.__parameters__ = parameters
        self.__topology_in_proto__ = topology.proto()
        self.__is_local__ = is_local
        self.__pserver_spec__ = pserver_spec
        self.__use_etcd__ = use_etcd

        self.__use_sparse_updater__ = self.__topology__.use_sparse_updater()
        # # In local mode, disable sparse_remote_update.
        if is_local:
            for param in self.__topology_in_proto__.parameters:
                if param.sparse_remote_update:
                    param.sparse_remote_update = False

        self.__gm_create_mode__ = api.CREATE_MODE_NORMAL if not \
            self.__use_sparse_updater__ else api.CREATE_MODE_SGD_SPARSE_CPU_TRAINING
        self.__data_types__ = topology.data_type()
        gm = api.GradientMachine.createFromConfigProto(
            self.__topology_in_proto__, self.__gm_create_mode__,
            self.__optimizer__.enable_types())
        assert isinstance(gm, api.GradientMachine)
        self.__gradient_machine__ = gm
        self.__gradient_machine__.randParameters()
        self.__parameters__.append_gradient_machine(gm)
        self.__parameter_updater__ = None
def blendFiveCone():
    baseSurfaces = BaseSurface()
    blend0 = baseSurfaces.blendCP("cone")
    blend0 = transform.move(blend0, 'y', 12)
    raw = RawSurface(blend0, 'cone')
    raw.setSharpEdges([])
    top1 = Topology.create(raw)

    blend2 = transform.rotate(blend0, 'z', -2*pi/5)
    raw = RawSurface(blend2, 'cone')
    raw.setSharpEdges([])
    top2 = Topology.create(raw)

    blend3 = transform.rotate(blend0, 'z', -4*pi/5)
    raw = RawSurface(blend3, 'cone')
    raw.setSharpEdges([])
    top3 = Topology.create(raw)

    blend4 = transform.rotate(blend0, 'z', -6*pi/5)
    raw = RawSurface(blend4, 'cone')
    raw.setSharpEdges([])
    top4 = Topology.create(raw)

    blend5 = transform.rotate(blend0, 'z', -8*pi/5)
    raw = RawSurface(blend5, 'cone')
    raw.setSharpEdges([])
    top5 = Topology.create(raw)

    # blend6 = transform.rotate(blend0, 'z', -5*pi/3)
    # raw = RawSurface(blend6, 'cone')
    # raw.setSharpEdges([])
    # top6 = Topology.create(raw)

    surfaces = [top1, top2, top3, top4, top5]
    tops = [2, 2, 2, 2, 2]
    bottoms = [0, 0, 0, 0, 0]
    aref = [13, 13]
    arefs = [1.5, 1.5]
    taref = [0.3, 0.3]
    name = 'fiveCone' + mode
    #mode = 'DS'
    topology = Topology.blendSurfaces(surfaces, tops, bottoms, aref, arefs, taref, name, mode)

    topology.writeToFile()
示例#47
0
文件: main.py 项目: cabeggar/simlive
                topology.topo.node[server]['server'] = 0
            else:
                topology.topo.node[server]['qoe'][pos] += viewer_number
                topology.topo.node[server]['server'] -= viewer_number

    # for u, v in topology.topo.edges_iter():
    #     print topology.topo.edge[u][v]['capacity']

    return failed_access, len(failed_channels)


if __name__ == "__main__":
    # Initialize network
    with open('topo/nsfnet.json') as sample_topo:
        data = json.load(sample_topo)
        topology = Topology(data)
        for node in topology.topo.nodes():
            print topology.topo.node[node]
        for u, v in topology.topo.edges_iter():
            print topology.topo.edge[u][v]
        print topology.routing

    # Initialize trace
    # with open('new_trace_pickle') as new_trace:
    #     trace = pickle.load(new_trace)
    #     rounds = len(trace.events)
    #     print "Pickle object loaded"
    trace = Trace('trace/')
    rounds = len(trace.events)

    # Initialize system
示例#48
0
 def test_single_component(self):
     image = np.zeros((self.SIZE, self.SIZE))
     topology = Topology()
     topology.calculate(image)
     self.assertEqual(len(topology), 1)
示例#49
0
 def assertTopologyLen(self, image, size):
     topology = Topology()
     topology.calculate(image)
     self.assertEqual(len(topology), size)
示例#50
0
class RdfDecoder():

	SCHEMAS = {
		'networkresources': 'http://unis.crest.iu.edu/schema/20151104/networkresource#',
		'nodes': 'http://unis.crest.iu.edu/schema/20151104/node#',
		'domains': 'http://unis.crest.iu.edu/schema/20151104/domain#',
		'ports': 'http://unis.crest.iu.edu/schema/20151104/port#',
		'links': 'http://unis.crest.iu.edu/schema/20151104/link#',
		'paths': 'http://unis.crest.iu.edu/schema/20151104/path#',
		'networks': 'http://unis.crest.iu.edu/schema/20151104/network#',
		'topologies': 'http://unis.crest.iu.edu/schema/20151104/topology#',
		'services': 'http://unis.crest.iu.edu/schema/20151104/service#',
		'blipp': 'http://unis.crest.iu.edu/schema/20151104/blipp#',
		'metadata': 'http://unis.crest.iu.edu/schema/20151104/metadata#',
		'datum': 'http://unis.crest.iu.edu/schema/20151104/datum#',
		'data': 'http://unis.crest.iu.edu/schema/20151104/data#',
		'ipports': 'http://unis.crest.iu.edu/schema/ext/ipport/1/ipport#'
	}

	def __init__(self, rdf_file_name, host_name, port_number='8888'):
		self.g=rdflib.Graph()
		self.g.load(rdf_file_name)
		self.prefix = """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> 
			PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> 
			PREFIX ndl: <http://www.science.uva.nl/research/sne/ndl#> """;
		self.uri="http://"+host_name+":"+port_number+"/"
		print("URI:"+self.uri)
		self.inode_object = IntermediateNode()
		self.topology_object = Topology()
		self.clean_all()
		# self.decode()


	def clean_all(self):
		print("Starting the initial CLEANUP PROCESS")
		self.clean_nodes()
		self.delete_ports()

	def clean_nodes(self):
		print("Deleting the nodes")
		nodes_uri=self.uri+"nodes"
		nodes_list = coreapi.get(nodes_uri)
		for node_dict in nodes_list:
			print node_dict['selfRef']
			# requests.delete(node_dict['selfRef'])
			requests.delete(nodes_uri+"/"+node_dict['selfRef'].split("/")[3])

	def delete_ports(self):
		print("Deleting the ports")
		ports_uri=self.uri+"ports"
		ports_list = coreapi.get(ports_uri)
		for port_dict in ports_list:
			print port_dict['selfRef']
			# requests.delete(port_dict['selfRef'])
			requests.delete(ports_uri+"/"+port_dict['selfRef'].split("/")[3])

	def decode(self):
		print("Starting the DECODE PROCESS")
		self.create_nodes_from_rdf()
		self.update_node_refs()
		self.create_ports_from_rdf()
		self.update_port_refs()
		self.build_port_ref_in_nodes()
		print("Starting Links creation")
		self.create_links_from_rdf()
		print("Printing Inodes")
		self.inode_object.print_inodes()

	def create_nodes_from_rdf(self):
		node_only_query=self.prefix+"""
		SELECT ?name 
		WHERE {
		        ?x rdf:type ndl:Device . ?x ndl:name ?name.
			OPTIONAL {
				?y ndl:connectedTo ?z .
				?z rdf:type ndl:Interface .
				?z ndl:name ?neighbour
			} . OPTIONAL {
				?y ndl:capacity ?capacity .
				?y ndl:encapsulation ?type
			} .
			
		}"""

		nodes_uri=self.uri+"nodes"
		nodes = []
		for row in self.g.query(node_only_query):
			node = dict()
			node["$schema"]=self.SCHEMAS['nodes']
			node["name"]=row.name
			# ports = []
			# port={'href':'instageni.illinois.edu_authority_cm_slice_idms','rel': 'full'}
			# ports.append(port)
			# node["ports"]=ports
			print "Node::"
			print row.name
			nodes.append(node)

		print "::FINAL JSON::"
		json_data = json.dumps(nodes)


		print("NODE URI::"+nodes_uri)
		print("JSON DATA:"+json_data)
		requests.post(nodes_uri, data = json_data)


	def update_node_refs(self):
		nodes_uri=self.uri+"nodes"
		nodes_list = coreapi.get(nodes_uri)

		for check_node in nodes_list:
			print check_node['name']
			print check_node['selfRef']
			node_object = Node(check_node['name'])
			self.topology_object.add_node(self.getId(check_node['selfRef']), node_object)
			print "\n"

		print("PRINIIIIIII")
		self.topology_object.display_topology()

	############# PORTS

	def getId(self, ref_url):
		"""
			http://10.10.0.135:8888/nodes/56f88569e1382308b0b6a2ea will return 56f88569e1382308b0b6a2ea
		:param ref_url:
		:return:
		"""
		return ref_url.split("/")[3]

	def create_ref_url(self, type, id):
		"""
			It will build the url from type and id
		:param type:
		:param id:
		:return:
		"""
		return self.uri+type+"/"+id

	def create_ports_from_rdf(self):
		interface_query=self.prefix+"""
		SELECT ?name ?interface
			WHERE {
			        ?x rdf:type ndl:Device . ?x ndl:name ?name .
				?x ndl:hasInterface ?interface 
				OPTIONAL {
					?y ndl:connectedTo ?z .
					?z rdf:type ndl:Interface .
					?z ndl:name ?neighbour
				} . OPTIONAL {
					?y ndl:capacity ?capacity .
					?y ndl:encapsulation ?type
				} .
				
			}"""

		ports_uri=self.uri+"ports"
		ports = []
		print("PORTSSSSS!!!!")
		for row in self.g.query(interface_query):
			print(row.name+" :::"+row.interface)
			temp_intf_name=row.interface
			intf_name=temp_intf_name.split("#")
			port_name_split=intf_name[1].split(":")
			port_node_name=port_name_split[0]
			port_name=port_name_split[1]
			port = dict()
			port["$schema"]=self.SCHEMAS['ports']
			port["name"]=port_name
			port["nodeRef"]=self.topology_object.get_node_id_by_name(port_node_name)
			ipv4_addr = dict()
			ipv4_addr["type"]="ipv4"
			## TODO: Fill the actual IPv4 address probably from the interface table using Router Proxy
			ipv4_addr["address"]="1.1.1.1"
			port["properties"]={"ipv4":ipv4_addr}
			ports.append(port)

		print "::FINAL PORTS JSON::"
		ports_json_data = json.dumps(ports)
		print(ports_json_data)
		print("PORTS URI::"+ports_uri)
		requests.post(ports_uri, data = ports_json_data)

	###### GET PORT REFS

	def update_port_refs(self):
		ports_uri=self.uri+"ports"
		ports_list = coreapi.get(ports_uri)

		for check_port in ports_list:
			port_name = check_port['name']
			port_ref = self.getId(check_port['selfRef'])
			node_ref = self.getId(check_port['nodeRef'])
			port_object = Port(node_ref, "2.2.2.2")
			self.topology_object.nodes[node_ref].add_port(port_ref, port_object)
			print "\n"

		print("PRINIIIIIII TOPO FINAL")
		self.topology_object.display_topology()


	######### BUILD PORT REF in NODES


	def build_port_ref_in_nodes(self):
		for node_id in self.topology_object.nodes.keys():
			self.topology_object.nodes[node_id].ports
			node = dict()
			ports = []
			for port_id in self.topology_object.nodes[node_id].ports.keys():
				port={'href': self.create_ref_url("ports", port_id),'rel': 'full'}
				ports.append(port)
			node["ports"]=ports
			print "::FINAL JSON::"
			json_data = json.dumps(node)
			print(json_data)

			r=requests.put(node_id, data=json_data)

	def create_links_from_rdf(self):
		links_query = self.prefix+"""
		SELECT ?name ?interface ?connectedTo ?type ?neighbour
		WHERE {
				?x rdf:type ndl:Device . ?x ndl:name ?name .
			?x ndl:hasInterface ?y . ?y rdf:type ndl:Interface .
			?y ndl:name ?interface . ?y ndl:connectedTo ?connectedTo .
			OPTIONAL {
				?y ndl:connectedTo ?z .
				?z rdf:type ndl:Interface .
				?z ndl:name ?neighbour
			} . OPTIONAL {
				?y ndl:capacity ?capacity .
				?y ndl:encapsulation ?type
			} .

		}"""
		for row in self.g.query(links_query):
			print(row['name'], row['interface'], row['connectedTo'])
			dest_node_name  = row['connectedTo'].split("#")[1]
			print("ref::"+row['connectedTo'].split("#")[1])
			print("router...name::"+row['interface'].split(":")[0])
			node_name = row['interface'].split(":")[0]
			print("interface...name::"+row['interface'].split(":")[1])
			intf_name = row['interface'].split(":")[1]
			self.inode_object.add_link_to_inode(node_name, intf_name, dest_node_name)

	def build_links(self):
		link = dict()
		link["directed"] = "false"
		link["$schema"] = self.SCHEMAS['links']
		link["name"] = "linkk"
def main():

    #Build the Argument parser
    parser = argparse.ArgumentParser(description='Generates attack graph input files from topological files')
    parser.add_argument('--hosts-interfaces-file', dest='hosts_interfaces_file', required=True,
                        help='The CSV file containing the hosts and the interfaces.')
    parser.add_argument('--vlans-file', dest='vlans_file', required=True,
                        help='The CSV file containing the VLANS.')
    parser.add_argument('--vulnerability-scan', dest='vulnerability_scan', required=False, nargs='+',
                        help='The Nessus scanner report file(s).')
    parser.add_argument('--openvas-scan', dest='openvas_vulnerability_scan', required=False, nargs='+',
                        help='The OpenVAS scanner report file(s).')
    parser.add_argument('--flow-matrix-file', dest='flow_matrix_file', required=False,
                        help='The CSV file containing the flow matrix')
    parser.add_argument('--routing-file', dest='routing_file', required=False,
                        help='The CSV file containing the routing informations')

    parser.add_argument('--mulval-output-file', dest='mulval_output_file', required=False,
                        help='The output path where the mulval input file will be stored.')

    parser.add_argument('--to-fiware-xml-topology', dest='to_fiware_xml_topology', required=False,
                        help='The path where the XML topology file should be stored.')

    parser.add_argument('--display-infos', action='store_true', dest='display_infos', required=False,
                        help='Display information and statistics about the topology.')

    parser.add_argument('-v', dest='verbose', action='store_true', default=False,
                        help='Set log printing level to INFO')

    parser.add_argument('-vv', dest='very_verbose', action='store_true', default=False,
                        help='Set log printing level to DEBUG')

    args = parser.parse_args()


    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    if args.very_verbose:
        logging.basicConfig(level=logging.DEBUG)
    if not args.verbose and not args.very_verbose:
        logging.basicConfig(level=logging.WARNING)

    logging.info("Loading the vulnerability database connector.")
    init_db()

    topology = Topology()
    topology.load_from_topological_input_files(args.hosts_interfaces_file, args.vlans_file)

    if args.vulnerability_scan:
        for vulnerabity_scan_file in args.vulnerability_scan:
            topology.add_nessus_report_information(vulnerabity_scan_file)

    if args.openvas_vulnerability_scan:
        for openvas_scan_file in args.openvas_vulnerability_scan:
            topology.add_openvas_report_information(openvas_scan_file)

    if args.flow_matrix_file:
        topology.flow_matrix = FlowMatrix(topology, args.flow_matrix_file)

    if args.routing_file:
        topology.load_routing_file(args.routing_file)
    else:
        logging.info("No flow matrix file has been provided.")
        topology.flow_matrix = FlowMatrix(topology)

    if args.display_infos:
        topology.print_details()

    if args.mulval_output_file:
        topology.to_mulval_input_file(args.mulval_output_file)

    if args.to_fiware_xml_topology:
        topology.to_fiware_topology_file(args.to_fiware_xml_topology)
示例#52
0
    def __init__(self, file):
        """Load a PDB file.
        
        The atom positions and Topology can be retrieved by calling getPositions() and getTopology().
        
        Parameters:
         - file (string) the name of the file to load
        """
        top = Topology()
        coords = [];
        ## The Topology read from the PDB file
        self.topology = top
        
        # Load the PDB file
        
        pdb = PdbStructure(open(file))
        PDBFile._loadNameReplacementTables()

        # Build the topology

        atomByNumber = {}
        for chain in pdb.iter_chains():
            c = top.addChain()
            for residue in chain.iter_residues():
                resName = residue.get_name()
                if resName in PDBFile._residueNameReplacements:
                    resName = PDBFile._residueNameReplacements[resName]
                r = top.addResidue(resName, c)
                if resName in PDBFile._atomNameReplacements:
                    atomReplacements = PDBFile._atomNameReplacements[resName]
                else:
                    atomReplacements = {}
                for atom in residue.atoms:
                    atomName = atom.get_name()
                    if atomName in atomReplacements:
                        atomName = atomReplacements[atomName]
                    atomName = atomName.strip()
                    element = None

                    # Try to guess the element.
                    
                    upper = atomName.upper()
                    if upper.startswith('CL'):
                        element = elem.chlorine
                    elif upper.startswith('NA'):
                        element = elem.sodium
                    elif upper.startswith('MG'):
                        element = elem.magnesium
                    elif upper.startswith('BE'):
                        element = elem.beryllium
                    elif upper.startswith('LI'):
                        element = elem.lithium
                    elif upper.startswith('K'):
                        element = elem.potassium
                    elif( len( residue ) == 1 and upper.startswith('CA') ):
                        element = elem.calcium
                    else:
                        try:
                            element = elem.get_by_symbol(atomName[0])
                        except KeyError:
                            pass
                    newAtom = top.addAtom(atomName, element, r)
                    atomByNumber[atom.serial_number] = newAtom
                    pos = atom.get_position()
                    coords.append(pos)
        ## The atom positions read from the PDB file
        self.positions = np.array(coords)
        self.topology.setUnitCellDimensions(pdb.get_unit_cell_dimensions())
        self.topology.createStandardBonds()
        self.topology.createDisulfideBonds(self.positions)
        
        # Add bonds based on CONECT records.
        
        connectBonds = []
        for connect in pdb.models[0].connects:
            i = connect[0]
            for j in connect[1:]:
                connectBonds.append((atomByNumber[i], atomByNumber[j]))
        if len(connectBonds) > 0:
            # Only add bonds that don't already exist.
            existingBonds = set(top.bonds())
            for bond in connectBonds:
                if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
                    top.addBond(bond[0], bond[1])
                    existingBonds.add(bond)
示例#53
0
class Communicator(object):
    '''
    Communicator talks to the client and receives commands from the client
    in terms of HTTP GET, PUT and DETELE verbs.
    Uses the HEAD verb as a control channel signaling the nodes to perform clean ups,
    remove dead peers from the key ring etc
    
    see utils package for sample standalone commands 
    '''

    base_file_path = os.path.abspath('.')

    #------------------------------------------------------------------------------ 
    # Logging setup
    #------------------------------------------------------------------------------ 
    logger = LogHelper.getLogger()

    def __init__(self, config):
        '''
        Constructor
        '''
        #=======================================================================
        # By this time, topology has configured itself based on the config file 
        # or the defaults
        #=======================================================================
        self.config = config
        self.topology = Topology(config)

        self.cache = Cache(reconstruct=self.config.local_reconstruct)

        self.logger.info('setup up communicator for node: ' + str(self.config.node_id) + '\n')


    def GET(self, key, origin='client'):

        if origin == 'client':
            self.logger.info("Request from client")
        else:
            if str(origin) == self.config.node_address:
                self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
                return None

        node = self.topology.key_manager.get_node(key)

#        for node in node_gen:

        self.logger.info("Key requested: " + key)
        self.logger.info("Node responsible: " + node)

        if node == self.config.node_address:  #we are responsible for fetching this key from the cache
            return self.cache.fetch(key)
        else:                                   #tell our peer to handle this key
            return self.topology.instructPeer(node, 'GET', cherrypy.serving.request.path_info) #TODO change faux get/post to auto route
            #else:
            #===============================================================================
            # ideally there should be a for loop at the commented for node in node_gen where node_gen
            # is a generator that loops over the set of possible peers
            # need to figure out a way to update underlying datastructure over which the generator rotates
            # this is against the laws of a generator, so will have to think of some other clever trick.
            # This means the node we contacted was down. It was removed from the key ring
            # since node_gen is a generator, we will sping until we hit next node who will 
            # assume responsibility for this key               
            #===============================================================================


    def PUT(self, key, value, origin='client'):

        if origin == 'client':
            self.logger.info("Request from client")
        else:
            if str(origin) == self.config.node_address:
                self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
                return None

        node = self.topology.key_manager.get_node(key)

#        for node in node_gen:

        if node == self.config.node_address:  #we are responsible for storing this key in the cache
            #also forward the request to mirrors
            print "mirroring"
            self.topology.mirror('PUT', cherrypy.serving.request.path_info)
            return self.cache.store(key, value)
        else:                                   #tell our peer to handle this key
            return self.topology.instructPeer(node, 'PUT', cherrypy.serving.request.path_info) #TODO change faux get/post to auto route
            #===============================================================================
            # This means the node we contacted was down. It was removed from the key ring
            # since node_gen is a generator, we will sping until we hit next node who will 
            # assume responsibility for this key               
            #===============================================================================



    def DELETE(self, key, origin='client'):

        if origin == 'client':
            self.logger.info("Request from Client")
        else:
            if str(origin) == self.config.node_address:
                self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
                return None

        node = self.topology.key_manager.get_node(key)

        if node == self.config.node_address:  #we are responsible for deleting this key from the cache
            #also forward the request to mirrors
            self.topology.mirror('DELETE', cherrypy.serving.request.path_info)
            return self.cache.erase(key)
        else:                                   #tell our peer to handle this key
            return self.topology.instructPeer(node, 'DELETE', cherrypy.serving.request.path_info) #TODO change faux get/post to auto route


    def HEAD(self, key=None, value=None):
        print "head called"
        print key
        print value
        if key is None:
            #===================================================================
            # This is a simple heartbeat check, do nothing
            #===================================================================
            return "alive"
        if not isinstance(key, NoneType) and not isinstance(value, NoneType):
            if key == 'connect':
                self.topology.connect(value)
            elif key == 'dead':
                print '%s is dead' % value
                self.topology.key_manager.remove_node(value)
        elif not isinstance(key, NoneType) and key == 'reconstruct':
            #===================================================================
            # Need to work on this functionality
            #===================================================================
            self.cache.data_map = self.cache.diskCache.reconstruct() or LRUDict()


    #===============================================================================
    # Need to tell the CherryPy engine to expose this class as a web-servlet definition
    #===============================================================================
    exposed = True
示例#54
0
    exit(1)

start = datetime.datetime.now()

try:
    vmfilter = args.vmfilter if args.vmfilter and 'all' not in args.vmfilter \
        else None
    iso = args.iso if args.iso else None
    no_rp = True if args.no_rp else False
    ifaces_naming = args.ifaces_naming if args.ifaces_naming else None
    packages = args.packages if args.packages else None
    single = args.single

    try:
        tp = Topology(cfg_path=args.config,
                  vmfilter=vmfilter, no_rp=no_rp,
                  ifaces_naming=ifaces_naming, single=single)
    except Exception as e:
        if 'check' in args.action:
            logger.error("Configuration {} is not valid!".format(args.config))
            logger.error("Error message:\n{}".format(str(e)))
            exit(1)
        else:
            raise

    for action in args.action.replace("+", ",").split(","):
        if action == 'stop' or action == 'poweroff':
            tp.power_off()
        elif action == 'destroy':
            tp.destroy_vms(tp.vms) if args.vmfilter else tp.destroy()
        elif 'start' == action or 'poweron' == action:
示例#55
0
    def __init__(self, m, n,
                 ipv4prefix = '10.0.0.0/30', ipv6prefix = 'a::/126'):
        assert m > 1
        assert n > 0
        Topology.__init__(self, m * n)

        p4 = ipaddr.IPv4Prefix(ipv4prefix)
        p6 = ipaddr.IPv6Prefix(ipv6prefix)

        class NetConfig(object):
            def __init__(self, net, ipv4prefix, ipv6prefix):
                self.net = net
                self.ipv4prefix = ipv4prefix
                self.ipv6prefix = ipv6prefix

        for i in xrange(n):
            for j in xrange(m):
                node = self.n[i * n + j]
                ifindex = 0

                # network above
                if j > 0:
                    above = self.n[i * n + j - 1]
                    netcfg = above.netcfg_below
                    addrlist = ['%s/%s' % (netcfg.ipv4prefix.addr(2),
                                           netcfg.ipv4prefix.prefixlen),
                                '%s/%s' % (netcfg.ipv6prefix.addr(2),
                                           netcfg.ipv6prefix.prefixlen)]
                    node.newnetif(netcfg.net, addrlist = addrlist,
                                  ifname = 'eth%s' % ifindex)
                    ifindex += 1
                    node.netcfg_above = above.netcfg_below

                # network to the left
                if i > 0:
                    left = self.n[(i - 1) * n + j]
                    netcfg = left.netcfg_right
                    addrlist = ['%s/%s' % (netcfg.ipv4prefix.addr(2),
                                           netcfg.ipv4prefix.prefixlen),
                                '%s/%s' % (netcfg.ipv6prefix.addr(2),
                                           netcfg.ipv6prefix.prefixlen)]
                    node.newnetif(netcfg.net, addrlist = addrlist,
                                  ifname = 'eth%s' % ifindex)
                    ifindex += 1
                    node.netcfg_left = left.netcfg_right

                # network to the right
                if i < n - 1:
                    net = self.session.addobj(cls = pycore.nodes.SwitchNode)
                    netcfg = NetConfig(net, p4, p6)
                    p4 += 1
                    p6 += 1
                    addrlist = ['%s/%s' % (netcfg.ipv4prefix.addr(1),
                                           netcfg.ipv4prefix.prefixlen),
                                '%s/%s' % (netcfg.ipv6prefix.addr(1),
                                           netcfg.ipv6prefix.prefixlen)]
                    node.newnetif(netcfg.net, addrlist = addrlist,
                                  ifname = 'eth%s' % ifindex)
                    ifindex += 1
                    node.netcfg_right = netcfg

                # network below
                if j < m - 1:
                    net = self.session.addobj(cls = pycore.nodes.SwitchNode)
                    netcfg = NetConfig(net, p4, p6)
                    p4 += 1
                    p6 += 1
                    addrlist = ['%s/%s' % (netcfg.ipv4prefix.addr(1),
                                           netcfg.ipv4prefix.prefixlen),
                                '%s/%s' % (netcfg.ipv6prefix.addr(1),
                                           netcfg.ipv6prefix.prefixlen)]
                    node.newnetif(netcfg.net, addrlist = addrlist,
                                  ifname = 'eth%s' % ifindex)
                    ifindex += 1
                    node.netcfg_below = netcfg