Esempio n. 1
0
def init_graph(n: int, d: int, seed=None) -> nx.Graph:
    """
    Initialize graph with n nodes, and d degree for Albert-Barabasi.
    :param n:
    :param d:
    :return:
    """
    import types

    # init barabasi-albert graph
    g: nx.Graph = nx.generators.random_graphs.barabasi_albert_graph(n, d, seed)

    # add node data to each node
    for _, data in g.nodes(data=True):
        data['data'] = NodeInfo.init_random()

    # add edge data to each edge
    for a, b, data in g.edges(data=True):
        _init_edge(g, a, b)

    # add custom add_edge method to graph
    # g.add_edge_with_init = types.MethodType(_add_edge, g)
    # g.update_fee = types.MethodType(_update_fee, g)
    # g.get_policy = types.MethodType(_get_policy, g)

    nx.Graph.add_edge_with_init = _add_edge
    nx.Graph.update_fee = _update_fee
    nx.Graph.get_policy = _get_policy

    return g
Esempio n. 2
0
def setup_graph(m=0, n=0):
    """ Sets up graph with last node as adversary with two existing channels.
    Caches graph for repeated use.
    """
    if not setup_graph.first_call:
        return nx.read_gpickle('graphstore.temp')
    else:
        setup_graph.first_call = False

        graph = init_graph(m, n)
        new_node = len(graph)
        graph.add_node(new_node, data=NodeInfo.init_random())

        # make initial channels for new node
        highest_degrees = sorted(
            graph.nodes, key=lambda node: len(graph[node]), reverse=True)
        e1, e2 = highest_degrees[0:2]
        graph.add_edge_with_init(new_node, e1, default=True)
        graph.add_edge_with_init(new_node, e2, default=True)

        # save graph for different runs
        nx.write_gpickle(graph, 'graphstore.temp')

        # debugging
        log.write('''## Graph used
|node |neighbours|
| --- | --- |
''')
        log.write('\n'.join(f'|{node:>4d} |{list(neighbours.keys())} |'
                            for node, neighbours in graph.adjacency()) + '\n --- \n')

        return graph
Esempio n. 3
0
 def default_lschema(self):
     return {AttrDict.KeyAny: NodeInfo()}
Esempio n. 4
0
    def __call__(self, inpt, dumper=NodeInfo(), loader=NodeInfo()):
        self._depth_counter += 1

        # First, looking for a proper dumper for `inpt`.
        dschema = None
        if hasattr(inpt, '__dump__'):
            dumper = inpt
            inpt_iter = dumper.__dump__()
            if hasattr(inpt, '__dschema__'):
                dschema = inpt.__dschema__()
        else:
            # if neither `inpt` nor `dumper` actually have a `__dump__`
            # method, we need to find a suitable dumper from `node_class_map`.
            if not hasattr(dumper, '__dump__'):
                node_info = None
                if not isinstance(dumper, NodeInfo):
                    node_info = NodeInfo(dumper)
                else:
                    node_info = copy.copy(dumper)
                    if node_info.class_info is None:
                        node_info.class_info = [type(inpt)]
                dumper = self._resolve_node_class(inpt, node_info, '__dump__')

            inpt_iter = dumper.__dump__(inpt)
            if hasattr(dumper, '__dschema__'):
                dschema = dumper.__dschema__(inpt)

        if dschema is None:
            dschema = self.default_dschema()
        dschema = AttrDict(dschema)

        # if `loader` doesn't actually have a `__load__` method,
        # we need to find a suitable loader from `node_class_map`,
        # or `fallback_map`.
        if not hasattr(loader, '__load__'):
            node_info = None
            if not isinstance(loader, NodeInfo):
                node_info = NodeInfo(loader)
            else:
                node_info = copy.copy(loader)

            # If the NodeInfo doesn't provide any useful `class_info` about
            # the node class, we directly try to find a good fallback.
            if node_info.class_info is None:
                loader = self._get_fallback(inpt, dumper)
            else:
                loader = self._resolve_node_class(inpt, node_info, '__load__')

        if hasattr(loader, '__lschema__'):
            lschema = loader.__lschema__()
        else:
            lschema = self.default_lschema()
        lschema = AttrDict(lschema)

        # Generator iterating on the dumped data, and which will be passed
        # to the loader. Calls the casting recursively if the schema has any nesting.
        generator = _Generator(self, inpt_iter, dschema, lschema)

        # Finally, we load the casted object.
        self.log('%s <= %s' % (dumper, inpt))
        casted = loader.__load__(generator)
        self.log('%s => %s' % (loader, casted))
        self._depth_counter -= 1
        return casted
Esempio n. 5
0
    def __init__(cls, name, bases, dict):
        gobject.GObjectMeta.__init__(cls, name, bases, dict)

        # merge all the properties from the base classes
        properties = []
        properties_by_name = {}
        for base in bases + (cls,):
            if hasattr(base, "properties"):
                for p in base.properties:
                    existing_p = properties_by_name.get(p.name)
                    if existing_p is None:
                        tmp = copy.copy(p)
                        properties.append(tmp)
                        properties_by_name[p.name] = tmp
                    else:
                        if existing_p.default is not None:
                            p.default = existing_p.default
                        if existing_p.fset is not None:
                            p.fset = existing_p.fset
                        if existing_p.fget is not None:
                            p.fget = existing_p.fget
        for p in properties:
            p.signal = False
            p.signal_name = None
        if "properties" in dict:
            for p in cls.properties:
                properties_by_name[p.name].signal = p.signal
        cls.properties = properties
        cls.properties_by_name = properties_by_name

        # create signals
        for p in cls.properties:
            if p.signal:
                # a signal is only created when explicitly mentioned in the
                # class itself (and not when it is inherited).
                p.signal_name = ("on-%s-changed" % p.name).replace("_", "-")
                # avoid signals to be created twice. This happes when a plugin
                # is loaded twice. (for example in the unittests)
                if gobject.signal_lookup(p.signal_name, cls) == 0:
                    gobject.signal_new(p.signal_name, cls, gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())

        # assign the derived default, get and set functions
        for p in cls.properties:
            # default
            if p.fdefault is not None:
                derived_fdefault = dict.get(p.fdefault.__name__)
                if derived_fdefault is not None:
                    p.fdefault = derived_fdefault
            # get
            derived_fget = dict.get(p.fget.__name__)
            if derived_fget is not None:
                p.fget = derived_fget
            # set
            derived_fset = dict.get(p.fset.__name__)
            if derived_fset is not None:
                p.fset = derived_fset

        # merge the edit fields with those from the ancestors
        if not hasattr(cls, "dialog_fields"):
            cls.dialog_fields = set([])
        for base in bases:
            if hasattr(base, "dialog_fields"):
                cls.dialog_fields |= base.dialog_fields

        # create a nodeinfo if needed
        if not hasattr(cls, "info"):
            from node import NodeInfo
            cls.info = NodeInfo()
        # merge the node info with that from the ancestors
        d = {}
        for base in bases:
            if hasattr(base, "info"):
                d.update(base.info.__dict__)
        d.update(cls.info.__dict__)
        cls.info.__dict__ = d
Esempio n. 6
0
    def _monitor_lrms_nodes(self):
        lrms_nodelist = self._platform.get_nodeinfolist()
        now = cpyutils.eventloop.now()

        if lrms_nodelist is None:
            err_time = now - self._timestamp_nodelist
            if err_time < _CONFIGURATION_MONITORING.PERIOD_MONITORING_NODES_FAIL_GRACE:
                _LOGGER.debug(
                    "could not obtain the node list from the platform in the last %s seconds, but we are in grace time"
                    % err_time)
                return

            _LOGGER.warning(
                "could not obtain the node list from the platform in the last %s seconds"
                % err_time)
            lrms_nodelist = {}

        if self._lrms_nodelist is None:
            self._lrms_nodelist = {}

            node_str = ""
            for n_id, node in lrms_nodelist.items():
                self._lrms_nodelist[n_id] = Node.create_from_nodeinfo(
                    node.get_nodeinfo())
                node_str = "%s%s\n" % (node_str, str(
                    self._lrms_nodelist[n_id]))

            # Now we'll get all the remaining nodes in the BD

            existing_hosts = self._db_system.get_hosts()
            latest_monitoring_data = self._db_system.retrieve_latest_monitoring_data(
            )
            for h_id in existing_hosts:
                if h_id not in self._lrms_nodelist:
                    if h_id in latest_monitoring_data:
                        ni = latest_monitoring_data[h_id].get_nodeinfo()
                    else:
                        ni = NodeInfo(h_id, -1, -1, -1, -1)
                    unknown_node = Node.create_from_nodeinfo(ni)
                    unknown_node.set_state(Node.UNKNOWN)
                    self._lrms_nodelist[h_id] = unknown_node

            _LOGGER.debug(
                "\nFirst monitorization of LRMS:\nList of nodes:\n%s" %
                node_str)

        # Now we are updating OUR information from the information that we got from the nodelist
        nodes_new = []
        nodes_changed = []

        for n_id, node in lrms_nodelist.items():
            nodeinfo = node.get_nodeinfo()

            if n_id in self._lrms_nodelist:
                _, state_changed = self._lrms_nodelist[n_id].update_info(
                    nodeinfo)
                if state_changed:
                    nodes_changed.append(n_id)
            else:
                _LOGGER.warning("node %s has just appeared" % n_id)
                self._lrms_nodelist[n_id] = Node.create_from_nodeinfo(nodeinfo)
                nodes_new.append(n_id)

        for n_id, node in self._lrms_nodelist.items():
            if (node.state != Node.UNKNOWN) and (n_id not in lrms_nodelist):
                _LOGGER.warning("node %s has dissapeared!" % n_id)
                # TODO: should delete the node?
                node.set_state(Node.UNKNOWN)
                nodes_changed.append(n_id)

        self._update_disabled_nodes()
        self._timestamp_nodelist = now

        for n_id, node in self._lrms_nodelist.items():
            if (node.state == Node.POW_ON) and (
                (now - node.timestamp_state) >
                    _CONFIGURATION_MONITORING.MAX_WAIT_POWERON):
                node.set_state(Node.OFF_ERR)
                nodes_changed.append(n_id)
            if (node.state == Node.POW_OFF) and (
                (now - node.timestamp_state) >
                    _CONFIGURATION_MONITORING.MAX_WAIT_POWEROFF):
                node.set_state(Node.ON_ERR)
                nodes_changed.append(n_id)

        # Now we should update the DB
        for n_id in nodes_changed + nodes_new:
            _LOGGER.debug("node %s changed its state" % n_id)
            self._db_system.store_node_info(self._lrms_nodelist[n_id])
Esempio n. 7
0
    signal.signal(signal.SIGABRT, receiveSignal)

    #id ip port of Node
    id = sys.argv[1]
    ip = sys.argv[2]
    port = sys.argv[3]

    node = Node(int(id), ip, int(port), 'boostrap')

    #id ip port for join point Node

    if len(sys.argv) > 5:
        id = sys.argv[4]
        ip = sys.argv[5]
        port = sys.argv[6]
        node.start_serving(NodeInfo(int(id), ip, int(port)))
    else:
        uri = sys.argv[4]
        node.start_serving(uri=uri)
    #this try is because when delete node the while raise an exception, this is when I kill this process
    try:
        while node:
            command = input('>>> ')

            if command == '' or command == 'show':
                node.print_info()
            if command == 'quit':
                sys.quit()
            if command.startswith('save'):
                keys = command.split()
                for key in keys[1:]: