Exemplo n.º 1
0
    def disconnection_request(self, payload):
        """ A request from a peer to disconnect a port"""
        if not ('peer_port_id' in payload or
                ('peer_actor_id' in payload and 'peer_port_name' in payload
                 and 'peer_port_dir' in payload)):
            # Not enough info to find port
            return response.CalvinResponse(response.BAD_REQUEST)
        # Check if port actually is local
        try:
            port = self._get_local_port(
                payload['peer_actor_id'] if 'peer_actor_id' in payload else
                None, payload['peer_port_name'] if 'peer_port_name' in payload
                else None, payload['peer_port_dir']
                if 'peer_port_dir' in payload else None,
                payload['peer_port_id'] if 'peer_port_id' in payload else None)
        except:
            # We don't have the port
            return response.CalvinResponse(response.NOT_FOUND)
        else:
            # Disconnect and destroy endpoints
            endpoints = port.disconnect()
            for ep in endpoints:
                if isinstance(ep, endpoint.TunnelOutEndpoint):
                    self.monitor.unregister_out_endpoint(ep)
                ep.destroy()

            return response.CalvinResponse(True)
Exemplo n.º 2
0
    def migrate(self, actor_id, node_id, callback=None):
        """ Migrate an actor actor_id to peer node node_id """
        if actor_id not in self.actors:
            # Can only migrate actors from our node
            if callback:
                callback(status=response.CalvinResponse(False))
            return
        if node_id == self.node.id:
            # No need to migrate to ourself
            if callback:
                callback(status=response.CalvinResponse(True))
            return

        actor = self.actors[actor_id]
        actor._migrating_to = node_id
        actor.will_migrate()
        actor_type = actor._type
        ports = actor.connections(self.node.id)
        # Disconnect ports and continue in _migrate_disconnect
        self.node.pm.disconnect(callback=CalvinCB(self._migrate_disconnected,
                                                  actor=actor,
                                                  actor_type=actor_type,
                                                  ports=ports,
                                                  node_id=node_id,
                                                  callback=callback),
                                actor_id=actor_id)
Exemplo n.º 3
0
    def _connect_by_peer_actor_id(self, key, value, **state):
        """ Gets called when storage responds with peer actor information"""
        _log.analyze(self.node.id,
                     "+",
                     {k: state[k]
                      for k in state.keys() if k != 'callback'},
                     peer_node_id=state['peer_node_id'])
        if not isinstance(value, dict):
            if state['callback']:
                state['callback'](status=response.CalvinResponse(
                    response.BAD_REQUEST,
                    "Storage return invalid information"),
                                  **state)
                return
            else:
                raise Exception("Storage return invalid information")

        if not state['peer_node_id'] and 'node_id' in value and value[
                'node_id']:
            state['peer_node_id'] = value['node_id']
        else:
            if state['callback']:
                state['callback'](status=response.CalvinResponse(
                    response.BAD_REQUEST,
                    "Storage return invalid information"),
                                  **state)
                return
            else:
                raise Exception("Storage return invalid information")

        self._connect(**state)
Exemplo n.º 4
0
    def deployment_add_requirements(self, application_id, reqs, cb):
        app = None
        try:
            app = self.applications[application_id]
        except:
            _log.debug("deployment_add_requirements did not find app %s" %
                       (application_id, ))
            cb(status=response.CalvinResponse(False))
            return
        _log.debug("deployment_add_requirements(app=%s,\n reqs=%s)" %
                   (self.applications[application_id], reqs))

        # TODO extract groups

        if "requirements" not in reqs:
            # No requirements then we are happy
            cb(status=response.CalvinResponse(True))
            return

        if hasattr(app, '_org_cb'):
            # application deployment requirements ongoing, abort
            cb(status=response.CalvinResponse(True))
            return
        app._org_cb = cb
        name_map = app.get_actor_name_map(ns=app.ns)
        app._track_actor_cb = app.get_actors(
        )[:]  # take copy of app's actor list, to remove each when done
        app.actor_placement = {}  # Clean placement slate
        _log.analyze(self._node.id,
                     "+ APP REQ", {'requirements': reqs["requirements"]},
                     tb=True)
        rr = reqs["requirements"].copy()
        for actor_name, req in rr.iteritems():
            # Component returns list of actor ids, actors returns list with single id
            actor_ids = name_map.get(
                (app.ns + ":" if app.ns else "") + actor_name, None)
            # Apply same rule to all actors in a component, rule get component information and can act accordingly
            for actor_id in actor_ids:
                if actor_id not in self._node.am.actors.keys():
                    _log.debug("Only apply requirements to local actors")
                    continue
                actor = self._node.am.actors[actor_id]
                actor.deployment_add_requirements(
                    req, component=(actor_ids if len(actor_ids) > 1 else None))
                _log.analyze(self._node.id,
                             "+ ACTOR REQ", {
                                 'actor_id': actor_id,
                                 'actor_ids': actor_ids
                             },
                             tb=True)
                self.actor_requirements(app, actor_id)
            _log.analyze(self._node.id,
                         "+ ACTOR REQ DONE", {'actor_ids': actor_ids},
                         tb=True)
        _log.analyze(self._node.id,
                     "+ DONE", {'application_id': application_id},
                     tb=True)
Exemplo n.º 5
0
    def _connect_via_tunnel(self, status=None, **state):
        """ All information and hopefully (status OK) a tunnel to the peer is available for a port connect"""
        port = self._get_local_port(state['actor_id'], state['port_name'],
                                    state['port_dir'], state['port_id'])
        _log.analyze(
            self.node.id,
            "+ " + str(status),
            dict({k: state[k]
                  for k in state.keys() if k != 'callback'},
                 port_is_connected=port.is_connected_to(
                     state['peer_port_id'])),
            peer_node_id=state['peer_node_id'])
        if port.is_connected_to(state['peer_port_id']):
            # The other end beat us to connecting the port, lets just report success and return
            _log.analyze(
                self.node.id,
                "+ IS CONNECTED",
                {k: state[k]
                 for k in state.keys() if k != 'callback'},
                peer_node_id=state['peer_node_id'])
            if state['callback']:
                state['callback'](status=response.CalvinResponse(True),
                                  **state)
            return None

        if not status:
            # Failed getting a tunnel, just inform the one wanting to connect
            if state['callback']:
                state['callback'](status=response.CalvinResponse(
                    response.INTERNAL_ERROR),
                                  **state)
                return None
        # Finally we have all information and a tunnel
        # Lets ask the peer if it can connect our port.
        tunnel = self.tunnels[state['peer_node_id']]
        _log.analyze(
            self.node.id,
            "+ SENDING",
            dict({k: state[k]
                  for k in state.keys() if k != 'callback'},
                 tunnel_status=self.tunnels[state['peer_node_id']].status),
            peer_node_id=state['peer_node_id'])
        if 'retries' not in state:
            state['retries'] = 0
        self.proto.port_connect(callback=CalvinCB(self._connected_via_tunnel,
                                                  **state),
                                port_id=state['port_id'],
                                peer_node_id=state['peer_node_id'],
                                peer_port_id=state['peer_port_id'],
                                peer_actor_id=state['peer_actor_id'],
                                peer_port_name=state['peer_port_name'],
                                peer_port_dir=state['peer_port_dir'],
                                tunnel=tunnel)
Exemplo n.º 6
0
 def tunnel_destroy_handler(self, payload):
     """ Destroy tunnel (response side) """
     try:
         self.network.link_check(to_rt_uuid)
     except:
         raise Exception("ERROR_UNKNOWN_RUNTIME")
     try:
         tunnel = self.tunnels[payload['from_rt_uuid']][
             payload['tunnel_id']]
     except:
         raise Exception("ERROR_UNKNOWN_TUNNEL")
         _log.analyze(self.rt_id,
                      "+ ERROR_UNKNOWN_TUNNEL",
                      payload,
                      peer_node_id=payload['from_rt_uuid'])
     # We have the tunnel so close it
     tunnel.close(local_only=True)
     ok = False
     try:
         # Hope the tunnel don't mind,
         # TODO since the requester likely don't know what to do and we have already closed it
         ok = tunnel.down_handler()
     except:
         pass
     msg = {
         'cmd': 'REPLY',
         'msg_uuid': payload['msg_uuid'],
         'value': response.CalvinResponse(ok).encode()
     }
     self.network.links[payload['from_rt_uuid']].send(msg)
Exemplo n.º 7
0
 def reply_handler(self, payload):
     """ Gets called when a REPLY messages arrives on this link """
     try:
         # Call the registered callback,for the reply message id, with the reply data as argument
         self.replies.pop(payload['msg_uuid'])(
             response.CalvinResponse(encoded=payload['value']))
     except:
         # We ignore unknown replies
         return
Exemplo n.º 8
0
    def new(self,
            actor_type,
            args,
            state=None,
            prev_connections=None,
            connection_list=None,
            callback=None,
            signature=None):
        """
        Instantiate an actor of type 'actor_type'. Parameters are passed in 'args',
        'name' is an optional parameter in 'args', specifying a human readable name.
        Returns actor id on success and raises an exception if anything goes wrong.
        Optionally applies a serialized state to the actor, the supplied args are ignored and args from state
        is used instead.
        Optionally reconnecting the ports, using either
          1) an unmodified connections structure obtained by the connections command supplied as
             prev_connections or,
          2) a mangled list of tuples with (in_node_id, in_port_id, out_node_id, out_port_id) supplied as
             connection_list
        """
        _log.debug("class: %s args: %s state: %s", actor_type, args, state)
        _log.analyze(self.node.id, "+", {
            'actor_type': actor_type,
            'state': state
        })

        try:
            if state:
                a = self._new_from_state(actor_type, state)
            else:
                a = self._new(actor_type, args)
        except Exception as e:
            _log.exception("Actor creation failed")
            raise (e)

        # Store the actor signature to enable GlobalStore lookup
        a._signature = signature

        self.actors[a.id] = a

        self.node.storage.add_actor(a, self.node.id)

        if prev_connections:
            # Convert prev_connections to connection_list format
            connection_list = self._prev_connections_to_connection_list(
                prev_connections)

        if connection_list:
            # Migrated actor
            self.connect(a.id, connection_list, callback=callback)
        else:
            # Nothing to connect then we are OK
            if callback:
                callback(status=response.CalvinResponse(True), actor_id=a.id)
            else:
                return a.id
Exemplo n.º 9
0
 def destroy_request(self, application_id, actor_ids):
     """ Request from peer of local application parts destruction and related actors """
     _log.debug("Destroy request, app: %s, actors: %s" %
                (application_id, actor_ids))
     _log.analyze(self._node.id, "+", {
         'application_id': application_id,
         'actor_ids': actor_ids
     })
     reply = response.CalvinResponse(True)
     for actor_id in actor_ids:
         if actor_id in self._node.am.list_actors():
             self._node.am.destroy(actor_id)
         else:
             reply = response.CalvinResponse(False)
     if application_id in self.applications:
         del self.applications[application_id]
     _log.debug("Destroy request reply %s" % reply)
     _log.analyze(self._node.id, "+ RESPONSE", {'reply': str(reply)})
     return reply
Exemplo n.º 10
0
 def _disconnected_port(self, reply, **state):
     """ Get called for each peer port when diconnecting but callback should only be called once"""
     try:
         # Remove this peer from the list of remote peer ports
         self.disconnecting_ports[state['port_id']].remove(state['peer_id'])
     except:
         pass
     if not reply:
         # Got failed response do callback, but also remove port from dictionary indicating we have sent the callback
         self.disconnecting_ports.pop(state['port_id'])
         if state['callback']:
             state['callback'](status=response.CalvinResponse(False),
                               **state)
     if state['port_id'] in self.disconnecting_ports:
         if not self.disconnecting_ports[state['port_id']]:
             # We still have port in dictionary and now list is empty hence we should send OK
             self.disconnecting_ports.pop(state['port_id'])
             if state['callback']:
                 state['callback'](status=response.CalvinResponse(True),
                                   **state)
Exemplo n.º 11
0
    def link_request_finished(self, key, value, callback):
        """ Called by storage when the node is (not) found """
        _log.analyze(self.node.id, "+", {'value': value}, peer_node_id=key)
        # Test if value is None or False indicating node does not currently exist in storage
        if not value:
            # the peer_id did not exist in storage
            callback(status=response.CalvinResponse(response.NOT_FOUND,
                                                    {'peer_node_id': key}))
            return

        # join the peer node
        self.join([value['uri']], callback, [key])
Exemplo n.º 12
0
 def _disconnecting_actor_cb(self, status, _callback, port_ids, **state):
     """ Get called for each of the actor's ports when disconnecting, but callback should only be called once
         status: OK or not
         _callback: original callback
         port_ids: list of port ids kept in context between calls when *changed* by this function, do not replace it
         state: dictionary keeping disconnect information
     """
     # Send negative response if not already done it
     if not status and port_ids:
         if _callback:
             del port_ids[:]
             _callback(status=response.CalvinResponse(False),
                       actor_id=state['actor_id'])
     if state['port_id'] in port_ids:
         # Remove this port from list
         port_ids.remove(state['port_id'])
         # If all ports done send positive response
         if not port_ids:
             if _callback:
                 _callback(status=response.CalvinResponse(True),
                           actor_id=state['actor_id'])
Exemplo n.º 13
0
 def tunnel_up(self, tunnel):
     """ Callback that the tunnel is working """
     tunnel_peer_id = tunnel.peer_node_id
     # If a port connect have ordered a tunnel then it have a callback in pending
     # which want to continue with the connection
     if tunnel_peer_id in self.pending_tunnels:
         for cb in self.pending_tunnels[tunnel_peer_id]:
             try:
                 cb(status=response.CalvinResponse(True))
             except:
                 pass
         self.pending_tunnels.pop(tunnel_peer_id)
Exemplo n.º 14
0
 def _actor_connected(self, status, peer_port_id, actor_id, peer_port_ids,
                      _callback, **kwargs):
     """ Get called for each of the actor's ports when connecting, but callback should only be called once
         status: success or not
         _callback: original callback
         peer_port_ids: list of port ids kept in context between calls when *changed* by this function,
                        do not replace it
     """
     # Send negative response if not already done it
     if not status and peer_port_ids:
         if _callback:
             del peer_port_ids[:]
             _callback(status=response.CalvinResponse(False),
                       actor_id=actor_id)
     if peer_port_id in peer_port_ids:
         # Remove this port from list
         peer_port_ids.remove(peer_port_id)
         # If all ports done send OK
         if not peer_port_ids:
             if _callback:
                 _callback(status=response.CalvinResponse(True),
                           actor_id=actor_id)
Exemplo n.º 15
0
    def tunnel_down(self, tunnel):
        """ Callback that the tunnel is not accepted or is going down """
        tunnel_peer_id = tunnel.peer_node_id
        try:
            self.tunnels.pop(tunnel_peer_id)
        except:
            pass

        # If a port connect have ordered a tunnel then it have a callback in pending
        # which want information on the failure
        if tunnel_peer_id in self.pending_tunnels:
            for cb in self.pending_tunnels[tunnel_peer_id]:
                try:
                    cb(status=response.CalvinResponse(False))
                except:
                    pass
            self.pending_tunnels.pop(tunnel_peer_id)
        # We should always return True which sends an OK on the destruction of the tunnel
        return True
Exemplo n.º 16
0
 def app_destroy(self, to_rt_uuid, callback, app_id, actor_ids):
     """ Destroys an application with remote actors on to_rt_uuid node
         callback: called when finished with the peer's respons as argument
         app_id: the application to destroy
         actor_ids: optional list of actors to destroy
     """
     if self.network.link_request(to_rt_uuid):
         # Already have link just continue in _app_destroy
         self._app_destroy(to_rt_uuid,
                           callback,
                           app_id,
                           actor_ids,
                           status=response.CalvinResponse(True))
     else:
         # Request link before continue in _app_destroy
         self.node.network.link_request(
             to_rt_uuid,
             CalvinCB(self._app_destroy,
                      to_rt_uuid=to_rt_uuid,
                      callback=callback,
                      app_id=app_id,
                      actor_ids=actor_ids))
Exemplo n.º 17
0
 def actor_new(self, to_rt_uuid, callback, actor_type, state,
               prev_connections):
     """ Creates a new actor on to_rt_uuid node, but is only intended for migrating actors 
         callback: called when finished with the peers respons as argument
         actor_type: see actor manager
         state: see actor manager
         prev_connections: see actor manager
     """
     if self.node.network.link_request(
             to_rt_uuid,
             CalvinCB(self._actor_new,
                      to_rt_uuid=to_rt_uuid,
                      callback=callback,
                      actor_type=actor_type,
                      state=state,
                      prev_connections=prev_connections)):
         # Already have link just continue in _actor_new
         self._actor_new(to_rt_uuid,
                         callback,
                         actor_type,
                         state,
                         prev_connections,
                         status=response.CalvinResponse(True))
Exemplo n.º 18
0
    def _app_requirements(self, app):
        _log.debug("_app_requirements(app=%s)" % (app, ))
        _log.analyze(
            self._node.id,
            "+ ACTOR PLACEMENT", {
                'placement':
                {k: list(v)
                 for k, v in app.actor_placement.iteritems()}
            },
            tb=True)
        if any([not n for n in app.actor_placement.values()]):
            # At least one actor have no possible placement
            app._org_cb(status=response.CalvinResponse(False))
            del app._org_cb
            _log.analyze(self._node.id,
                         "+ NO PLACEMENT", {'app_id': app.id},
                         tb=True)
            return

        # Collect an actor by actor matrix stipulating a weighting 0.0 - 1.0 for their connectivity
        actor_ids, actor_matrix = self._actor_connectivity(app)

        # Get list of all possible nodes
        node_ids = set([])
        for possible_nodes in app.actor_placement.values():
            node_ids |= possible_nodes
        node_ids = list(node_ids)
        _log.analyze(self._node.id,
                     "+ ACTOR MATRIX", {
                         'actor_ids': actor_ids,
                         'actor_matrix': actor_matrix,
                         'node_ids': node_ids
                     },
                     tb=True)

        # Weight the actors possible placement with their connectivity matrix
        weighted_actor_placement = {}
        for actor_id in actor_ids:
            # actor matrix is symmetric, so independent if read horizontal or vertical
            actor_weights = actor_matrix[actor_ids.index(actor_id)]
            # Sum the actor weights for each actors possible nodes, matrix mult AA * AN,
            # AA actor weights, AN actor * node with 1 when possible
            weights = [
                sum([
                    actor_weights[actor_ids.index(_id)]
                    if node_id in app.actor_placement[actor_id] else 0
                    for _id in actor_ids
                ]) for node_id in node_ids
            ]
            # Get first node with highest weight
            # FIXME should verify that the node actually exist also
            # TODO should select from a resource sharing perspective also, instead of picking first max
            _log.analyze(self._node.id, "+ WEIGHTS", {
                'actor_id': actor_id,
                'weights': weights
            })
            weighted_actor_placement[actor_id] = node_ids[weights.index(
                max(weights))]

        for actor_id, node_id in weighted_actor_placement.iteritems():
            # TODO could add callback to try another possible node if the migration fails
            _log.debug("Actor deployment %s \t-> %s" %
                       (app.actors[actor_id], node_id))
            self._node.am.migrate(actor_id, node_id)

        app._org_cb(status=response.CalvinResponse(True),
                    placement=weighted_actor_placement)
        del app._org_cb
        _log.analyze(self._node.id, "+ DONE", {'app_id': app.id}, tb=True)
Exemplo n.º 19
0
    def join_finished(self, tp_link, peer_id, uri, is_orginator):
        """ Peer join is (not) accepted, called by transport plugin.
            This may be initiated by us (is_orginator=True) or by the peer, 
            i.e. both nodes get called.
            When inititated by us pending_joins likely have a callback

            tp_link: the transport plugins object for the link (have send etc)
            peer_id: the node id we joined
            uri: the uri used for the join
            is_orginator: did this node request the join True/False
        """
        # while a link is pending it is the responsibility of the transport layer, since
        # higher layers don't have any use for it anyway
        _log.analyze(self.node.id,
                     "+", {
                         'uri': uri,
                         'peer_id': peer_id
                     },
                     peer_node_id=peer_id)
        if tp_link is None:
            # This is a failed join lets send it upwards
            if uri in self.pending_joins:
                cbs = self.pending_joins.pop(uri)
                if cbs:
                    for cb in cbs:
                        cb(status=response.CalvinResponse(
                            response.SERVICE_UNAVAILABLE),
                           uri=uri,
                           peer_node_id=peer_id)
            return
        # Only support for one RT to RT communication link per peer
        if peer_id in self.links:
            # Likely simultaneous join requests, use the one requested by the node with highest id
            if is_orginator and self.node.id > peer_id:
                # We requested it and we have highest node id, hence the one in links is the peer's and we replace it
                _log.analyze(self.node.id,
                             "+ REPLACE ORGINATOR", {
                                 'uri': uri,
                                 'peer_id': peer_id
                             },
                             peer_node_id=peer_id)
                self.links[peer_id] = CalvinLink(self.node.id, peer_id,
                                                 tp_link, self.links[peer_id])
            elif is_orginator and self.node.id < peer_id:
                # We requested it and peer have highest node id, hence the one in links is peer's and we close this new
                _log.analyze(self.node.id,
                             "+ DROP ORGINATOR", {
                                 'uri': uri,
                                 'peer_id': peer_id
                             },
                             peer_node_id=peer_id)
                tp_link.disconnect()
            elif not is_orginator and self.node.id > peer_id:
                # Peer requested it and we have highest node id, hence the one in links is ours and we close this new
                _log.analyze(self.node.id,
                             "+ DROP", {
                                 'uri': uri,
                                 'peer_id': peer_id
                             },
                             peer_node_id=peer_id)
                tp_link.disconnect()
            elif not is_orginator and self.node.id < peer_id:
                # Peer requested it and peer have highest node id, hence the one in links is ours and we replace it
                _log.analyze(self.node.id,
                             "+ REPLACE", {
                                 'uri': uri,
                                 'peer_id': peer_id
                             },
                             peer_node_id=peer_id)
                self.links[peer_id] = CalvinLink(self.node.id, peer_id,
                                                 tp_link, self.links[peer_id])
        else:
            # No simultaneous join detected, just add the link
            _log.analyze(self.node.id,
                         "+ INSERT", {
                             'uri': uri,
                             'peer_id': peer_id
                         },
                         peer_node_id=peer_id)
            self.links[peer_id] = CalvinLink(self.node.id, peer_id, tp_link)

        # Find and call any callbacks registered for the uri or peer id
        _log.debug(
            "%s: peer_id: %s, uri: %s\npending_joins_by_id: %s\npending_joins: %s"
            % (self.node.id, peer_id, uri, self.pending_joins_by_id,
               self.pending_joins))
        if peer_id in self.pending_joins_by_id:
            peer_uri = self.pending_joins_by_id.pop(peer_id)
            if peer_uri in self.pending_joins:
                cbs = self.pending_joins.pop(peer_uri)
                if cbs:
                    for cb in cbs:
                        cb(status=response.CalvinResponse(True),
                           uri=peer_uri,
                           peer_node_id=peer_id)

        if uri in self.pending_joins:
            cbs = self.pending_joins.pop(uri)
            if cbs:
                for cb in cbs:
                    cb(status=response.CalvinResponse(True),
                       uri=uri,
                       peer_node_id=peer_id)

        return
Exemplo n.º 20
0
    def join(self, uris, callback=None, corresponding_peer_ids=None):
        """ Join the peers accessable from list of URIs
            It is possible to have a list of corresponding peer_ids,
            which is used to filter the uris list for already connected
            or pending peer's connections.
            URI can't be used for matching since not neccessarily the same if peer connect to us
            uris: list of uris
            callback: will get called for each uri with arguments status, peer_node_id and uri
            corresponding_peer_ids: list of node ids matching the list of uris

            TODO: If corresponding_peer_ids is not specified it is possible that the callback is never called
            when a simultaneous join happens due to that it is not possible to detect by URI only.
            Should add a timeout that cleans out callbacks with failed status replies and let client retry.
        """
        # For each URI and when available a peer id
        for uri, peer_id in zip(
                uris, corresponding_peer_ids if corresponding_peer_ids
                and len(uris) == len(corresponding_peer_ids) else [None] *
                len(uris)):
            if not (uri in self.pending_joins or peer_id
                    in self.pending_joins_by_id or peer_id in self.links):
                # No simultaneous join detected
                schema = uri.split(":", 1)[0]
                _log.analyze(self.node.id,
                             "+", {
                                 'uri': uri,
                                 'peer_id': peer_id,
                                 'schema': schema,
                                 'transports': self.transports.keys()
                             },
                             peer_node_id=peer_id)
                if schema in self.transports.keys():
                    # store we have a pending join and its callback
                    if peer_id:
                        self.pending_joins_by_id[peer_id] = uri
                    if callback:
                        self.pending_joins[uri] = [callback]
                    # Ask the transport plugin to do the join
                    _log.analyze(self.node.id,
                                 "+ TRANSPORT", {
                                     'uri': uri,
                                     'peer_id': peer_id
                                 },
                                 peer_node_id=peer_id)
                    self.transports[schema].join(uri)
            else:
                # We have simultaneous joins
                _log.analyze(self.node.id,
                             "+ SIMULTANEOUS", {
                                 'uri': uri,
                                 'peer_id': peer_id
                             },
                             peer_node_id=peer_id)
                if callback:
                    if peer_id in self.links:
                        # Link was already established, then need to call the callback now
                        callback(status=response.CalvinResponse(True), uri=uri)
                        continue
                    # Otherwise also want to be called when the ongoing link setup finishes
                    if uri in self.pending_joins:
                        self.pending_joins[uri].append(callback)
                    else:
                        self.pending_joins[uri] = [callback]
Exemplo n.º 21
0
    def connect(self,
                callback=None,
                actor_id=None,
                port_name=None,
                port_dir=None,
                port_id=None,
                peer_node_id=None,
                peer_actor_id=None,
                peer_port_name=None,
                peer_port_dir=None,
                peer_port_id=None):
        """ Obtain any missing information to enable making a connection and make actual connect 
            callback: an optional callback that gets called with status when finished
            local port identified by:
                actor_id, port_name and port_dir='in'/'out' or
                port_id
            peer_node_id: an optional node id the peer port is locate on, will use storage to find it if not supplied
            peer port (remote or local) identified by:
                peer_actor_id, peer_port_name and peer_port_dir='in'/'out' or
                peer_port_id
                
            connect -----------------------------> _connect -> _connect_via_tunnel -> _connected_via_tunnel -!
                    \> _connect_by_peer_port_id /           \-> _connect_via_local -!
                    \-> _connect_by_actor_id ---/
        """
        # Collect all parameters into a state that we keep between the chain of callbacks needed to complete a connection
        state = {
            'callback': callback,
            'actor_id': actor_id,
            'port_name': port_name,
            'port_dir': port_dir,
            'port_id': port_id,
            'peer_node_id': peer_node_id,
            'peer_actor_id': peer_actor_id,
            'peer_port_name': peer_port_name,
            'peer_port_dir': peer_port_dir,
            'peer_port_id': peer_port_id
        }
        _log.analyze(self.node.id,
                     "+",
                     {k: state[k]
                      for k in state.keys() if k != 'callback'},
                     peer_node_id=state['peer_node_id'])
        try:
            port = self._get_local_port(actor_id, port_name, port_dir, port_id)
        except:
            # not local
            if port_id:
                status = response.CalvinResponse(
                    response.BAD_REQUEST,
                    "First port %s must be local" % (port_id))
            else:
                status = response.CalvinResponse(
                    response.BAD_REQUEST,
                    "First port %s on actor %s must be local" %
                    (port_name, actor_id))
            if callback:
                callback(status=status,
                         actor_id=actor_id,
                         port_name=port_name,
                         port_id=port_id,
                         peer_node_id=peer_node_id,
                         peer_actor_id=peer_actor_id,
                         peer_port_name=peer_port_name,
                         peer_port_id=peer_port_id)
                return
            else:
                raise Exception(str(status))
        else:
            # Found locally
            state['port_id'] = port.id

        # Check if the peer port is local even if a missing peer_node_id
        if not peer_node_id and peer_actor_id in self.node.am.actors.iterkeys(
        ):
            state['peer_node_id'] = self.node.id

        if not state['peer_node_id'] and state['peer_port_id']:
            try:
                self._get_local_port(None, None, None, peer_port_id)
            except:
                # not local
                pass
            else:
                # Found locally
                state['peer_node_id'] = self.node.id

        # Still no peer node id? ...
        if not state['peer_node_id']:
            if state['peer_port_id']:
                # ... but an id of a port lets ask for more info
                self.node.storage.get_port(
                    state['peer_port_id'],
                    CalvinCB(self._connect_by_peer_port_id, **state))
                return
            elif state['peer_actor_id'] and state['peer_port_name']:
                # ... but an id of an actor lets ask for more info
                self.node.storage.get_actor(
                    state['peer_actor_id'],
                    CalvinCB(self._connect_by_peer_actor_id, **state))
                return
            else:
                # ... and no info on how to get more info, abort
                status = response.CalvinResponse(
                    response.BAD_REQUEST,
                    "Need peer_node_id (%s), peer_actor_id(%s) and/or peer_port_id(%s)"
                    % (peer_node_id, peer_actor_id, peer_port_id))
                if callback:
                    callback(status=status,
                             actor_id=actor_id,
                             port_name=port_name,
                             port_id=port_id,
                             peer_node_id=peer_node_id,
                             peer_actor_id=peer_actor_id,
                             peer_port_name=peer_port_name,
                             peer_port_id=peer_port_id)
                    return
                else:
                    raise Exception(str(status))
        else:
            if not ((peer_actor_id and peer_port_name) or peer_port_id):
                # We miss information on to find the peer port
                status = response.CalvinResponse(
                    response.BAD_REQUEST,
                    "Need peer_port_name (%s), peer_actor_id(%s) and/or peer_port_id(%s)"
                    % (peer_port_name, peer_actor_id, peer_port_id))
                if callback:
                    callback(status=status,
                             actor_id=actor_id,
                             port_name=port_name,
                             port_id=port_id,
                             peer_node_id=peer_node_id,
                             peer_actor_id=peer_actor_id,
                             peer_port_name=peer_port_name,
                             peer_port_id=peer_port_id)
                    return
                else:
                    raise Exception(str(status))

        self._connect(**state)
Exemplo n.º 22
0
    def _disconnect_port(self, callback=None, port_id=None):
        """ Obtain any missing information to enable disconnecting one port and make the disconnect"""
        # Collect all parameters into a state that we keep for the sub functions and callback
        state = {'callback': callback, 'port_id': port_id, 'peer_ids': None}
        # Check if port actually is local
        try:
            port = self._get_local_port(None, None, None, port_id)
        except:
            # not local
            status = response.CalvinResponse(
                response.NOT_FOUND, "Port %s must be local" % (port_id))
            if callback:
                callback(status=status, port_id=port_id)
                return
            else:
                raise Exception(str(status))
        else:
            # Found locally
            state['port_name'] = port.name
            state['port_dir'] = "in" if isinstance(port, InPort) else "out"
            state['actor_id'] = port.owner.id if port.owner else None

        port = self.ports[state['port_id']]
        # Now check the peer port, peer_ids is list of (peer_node_id, peer_port_id) tuples
        peer_ids = []
        if isinstance(port, InPort):
            # Inport only have one possible peer
            peer_ids = [port.get_peer()]
        else:
            # Outport have several possible peers
            peer_ids = port.get_peers()

        # Disconnect and destroy the endpoints
        endpoints = port.disconnect()
        for ep in endpoints:
            if isinstance(ep, endpoint.TunnelOutEndpoint):
                self.monitor.unregister_out_endpoint(ep)
            ep.destroy()

        ok = True
        for peer_node_id, peer_port_id in peer_ids:
            if peer_node_id == 'local':
                # Use the disconnect request function since does not matter if local or remote request
                if not self.disconnection_request(
                    {'peer_port_id': peer_port_id}):
                    ok = False

        # Inform all the remote ports of the disconnect
        remote_peers = [pp for pp in peer_ids if pp[0] and pp[0] != 'local']
        # Keep track of disconnection of remote peer ports
        self.disconnecting_ports[state['port_id']] = remote_peers
        for peer_node_id, peer_port_id in remote_peers:
            self.proto.port_disconnect(callback=CalvinCB(
                self._disconnected_port,
                peer_id=(peer_node_id, peer_port_id),
                **state),
                                       port_id=state['port_id'],
                                       peer_node_id=peer_node_id,
                                       peer_port_id=peer_port_id)

        # Done disconnecting the port
        if not remote_peers or not ok:
            self.disconnecting_ports.pop(state['port_id'])
            if state['callback']:
                _log.analyze(
                    self.node.id, "+ DONE",
                    {k: state[k]
                     for k in state.keys() if k != 'callback'})
                state['callback'](status=response.CalvinResponse(ok), **state)
Exemplo n.º 23
0
    def connection_request(self, payload):
        """ A request from a peer to connect a port"""
        _log.analyze(self.node.id,
                     "+",
                     payload,
                     peer_node_id=payload['from_rt_uuid'])
        if not ('peer_port_id' in payload or
                ('peer_actor_id' in payload and 'peer_port_name' in payload
                 and 'peer_port_dir' in payload)):
            # Not enough info to find port
            _log.analyze(self.node.id,
                         "+ NOT ENOUGH DATA",
                         payload,
                         peer_node_id=payload['from_rt_uuid'])
            return response.CalvinResponse(response.BAD_REQUEST)
        try:
            port = self._get_local_port(payload['peer_actor_id'],
                                        payload['peer_port_name'],
                                        payload['peer_port_dir'],
                                        payload['peer_port_id'])
        except:
            # We don't have the port
            _log.analyze(self.node.id,
                         "+ PORT NOT FOUND",
                         payload,
                         peer_node_id=payload['from_rt_uuid'])
            return response.CalvinResponse(response.NOT_FOUND)
        else:
            if not 'tunnel_id' in payload:
                # TODO implement connection requests not via tunnel
                raise NotImplementedError()
            tunnel = self.tunnels[payload['from_rt_uuid']]
            if tunnel.id != payload['tunnel_id']:
                # For some reason does the tunnel id not match the one we have to connect to the peer
                # Likely due to that we have not yet received a tunnel request from the peer that replace our tunnel id
                # Can happen when race of simultaneous link setup and commands can be received out of order
                _log.analyze(self.node.id,
                             "+ WRONG TUNNEL",
                             payload,
                             peer_node_id=payload['from_rt_uuid'])
                return response.CalvinResponse(response.GONE)

            if isinstance(port, InPort):
                endp = endpoint.TunnelInEndpoint(port, tunnel,
                                                 payload['from_rt_uuid'],
                                                 payload['port_id'],
                                                 self.node.sched.trigger_loop)
            else:
                endp = endpoint.TunnelOutEndpoint(port, tunnel,
                                                  payload['from_rt_uuid'],
                                                  payload['port_id'],
                                                  self.node.sched.trigger_loop)
                self.monitor.register_out_endpoint(endp)

            invalid_endpoint = port.attach_endpoint(endp)
            # Remove previous endpoint
            if invalid_endpoint:
                if isinstance(invalid_endpoint, endpoint.TunnelOutEndpoint):
                    self.monitor.unregister_out_endpoint(invalid_endpoint)
                invalid_endpoint.destroy()

            # Update storage
            if isinstance(port, InPort):
                self.node.storage.add_port(port, self.node.id, port.owner.id,
                                           "in")
            else:
                self.node.storage.add_port(port, self.node.id, port.owner.id,
                                           "out")

            _log.analyze(self.node.id,
                         "+ OK",
                         payload,
                         peer_node_id=payload['from_rt_uuid'])
            return response.CalvinResponse(response.OK, {'port_id': port.id})
Exemplo n.º 24
0
    def _connected_via_tunnel(self, reply, **state):
        """ Gets called when remote responds to our request for port connection """
        _log.analyze(self.node.id,
                     "+ " + str(reply),
                     {k: state[k]
                      for k in state.keys() if k != 'callback'},
                     peer_node_id=state['peer_node_id'])
        if reply in [response.BAD_REQUEST, response.NOT_FOUND]:
            # Other end did not accept our port connection request
            if state['retries'] == 0 and state['peer_node_id']:
                # Maybe it is on another node now lets retry and lookup the port
                state['peer_node_id'] = None
                state['retries'] += 1
                self.node.storage.get_port(
                    state['peer_port_id'],
                    CalvinCB(self._connect_by_peer_port_id, **state))
                return None
            if state['callback']:
                state['callback'](status=response.CalvinResponse(
                    response.NOT_FOUND),
                                  **state)
                return None

        if reply == response.GONE:
            # Other end did not accept our port connection request, likely due to they have not got the message
            # about the tunnel in time
            _log.analyze(
                self.node.id,
                "+ RETRY",
                {k: state[k]
                 for k in state.keys() if k != 'callback'},
                peer_node_id=state['peer_node_id'])
            if state['retries'] < 2:
                state['retries'] += 1
                self._connect_via_tunnel(**state)
                return None

        # Set up the port's endpoint
        tunnel = self.tunnels[state['peer_node_id']]
        port = self.ports[state['port_id']]
        if isinstance(port, InPort):
            endp = endpoint.TunnelInEndpoint(port, tunnel,
                                             state['peer_node_id'],
                                             reply.data['port_id'],
                                             self.node.sched.trigger_loop)
        else:
            endp = endpoint.TunnelOutEndpoint(port, tunnel,
                                              state['peer_node_id'],
                                              reply.data['port_id'],
                                              self.node.sched.trigger_loop)
            # register into main loop
            self.monitor.register_out_endpoint(endp)
        invalid_endpoint = port.attach_endpoint(endp)
        # remove previous endpoint
        if invalid_endpoint:
            if isinstance(invalid_endpoint, endpoint.TunnelOutEndpoint):
                self.monitor.unregister_out_endpoint(invalid_endpoint)
            invalid_endpoint.destroy()

        # Done connecting the port
        if state['callback']:
            state['callback'](status=response.CalvinResponse(True), **state)

        # Update storage
        if isinstance(port, InPort):
            self.node.storage.add_port(port, self.node.id, port.owner.id, "in")
        else:
            self.node.storage.add_port(port, self.node.id, port.owner.id,
                                       "out")
Exemplo n.º 25
0
    def tunnel_new_handler(self, payload):
        """ Create a new tunnel (response side) """
        tunnel = self._get_tunnel(payload['from_rt_uuid'], payload['type'])
        ok = False
        _log.analyze(self.rt_id,
                     "+",
                     payload,
                     peer_node_id=payload['from_rt_uuid'])
        if tunnel:
            _log.analyze(self.rt_id,
                         "+ PENDING",
                         payload,
                         peer_node_id=payload['from_rt_uuid'])
            # Got tunnel new request while we already have one pending
            # it is not allowed to send new request while a tunnel is working
            if tunnel.status != CalvinTunnel.STATUS.WORKING:
                ok = True
                # The one with lowest tunnel id loose
                if tunnel.id < payload['tunnel_id']:
                    # Our tunnel has lowest id, change our tunnels id
                    # update status and call proper callbacks
                    # but send tunnel reply first, to get everything in order
                    msg = {
                        'cmd':
                        'REPLY',
                        'msg_uuid':
                        payload['msg_uuid'],
                        'value':
                        response.CalvinResponse(ok,
                                                data={
                                                    'tunnel_id':
                                                    payload['tunnel_id']
                                                }).encode()
                    }
                    self.network.links[payload['from_rt_uuid']].send(msg)
                    tunnel._setup_ack(
                        response.CalvinResponse(
                            True, data={'tunnel_id': payload['tunnel_id']}))
                    _log.analyze(self.rt_id,
                                 "+ CHANGE ID",
                                 payload,
                                 peer_node_id=payload['from_rt_uuid'])
                else:
                    # Our tunnel has highest id, keep our id
                    # update status and call proper callbacks
                    # but send tunnel reply first, to get everything in order
                    msg = {
                        'cmd':
                        'REPLY',
                        'msg_uuid':
                        payload['msg_uuid'],
                        'value':
                        response.CalvinResponse(ok,
                                                data={
                                                    'tunnel_id':
                                                    payload['tunnel_id']
                                                }).encode()
                    }
                    self.network.links[payload['from_rt_uuid']].send(msg)
                    tunnel._setup_ack(
                        response.CalvinResponse(True,
                                                data={'tunnel_id': tunnel.id}))
                    _log.analyze(self.rt_id,
                                 "+ KEEP ID",
                                 payload,
                                 peer_node_id=payload['from_rt_uuid'])
            else:
                # FIXME if this happens need to decide what to do
                _log.analyze(self.rt_id,
                             "+ DROP FIXME",
                             payload,
                             peer_node_id=payload['from_rt_uuid'])
            return
        else:
            # No simultaneous tunnel requests, lets create it...
            tunnel = CalvinTunnel(self.network.links,
                                  self.tunnels,
                                  payload['from_rt_uuid'],
                                  payload['type'],
                                  payload['policy'],
                                  rt_id=self.node.id,
                                  id=payload['tunnel_id'])
            _log.analyze(self.rt_id,
                         "+ NO SMASH",
                         payload,
                         peer_node_id=payload['from_rt_uuid'])
            try:
                # ... and see if the handler wants it
                ok = self.tunnel_handlers[payload['type']](tunnel)
            except:
                pass
        # Send the response
        msg = {
            'cmd':
            'REPLY',
            'msg_uuid':
            payload['msg_uuid'],
            'value':
            response.CalvinResponse(ok, data={
                'tunnel_id': tunnel.id
            }).encode()
        }
        self.network.links[payload['from_rt_uuid']].send(msg)

        # If handler did not want it close it again
        if not ok:
            tunnel.close(local_only=True)
Exemplo n.º 26
0
    def disconnect(self,
                   callback=None,
                   actor_id=None,
                   port_name=None,
                   port_dir=None,
                   port_id=None):
        """ Do disconnect for port(s)
            callback: an optional callback that gets called with status when finished
            ports identified by only local actor_id:
                actor_id: the actor that all ports will be disconnected on
                callback will be called once when all ports are diconnected or first failed
            local port identified by:
                actor_id, port_name and port_dir='in'/'out' or
                port_id
                callback will be called once when all peer ports (fanout) are disconnected or first failed

            disconnect -*> _disconnect_port -*> _disconnected_port (-*> _disconnecting_actor_cb) -> !
        """
        port_ids = []
        if actor_id and not (port_id or port_name or port_dir):
            # We disconnect all ports on an actor
            try:
                actor = self.node.am.actors[actor_id]
            except:
                # actor not found
                status = response.CalvinResponse(
                    response.NOT_FOUND, "Actor %s must be local" % (actor_id))
                if callback:
                    callback(status=status,
                             actor_id=actor_id,
                             port_name=port_name,
                             port_id=port_id)
                    return
                else:
                    raise Exception(str(status))
            else:
                port_ids.extend([p.id for p in actor.inports.itervalues()])
                port_ids.extend([p.id for p in actor.outports.itervalues()])
                # Need to collect all callbacks into one
                if callback:
                    callback = CalvinCB(self._disconnecting_actor_cb,
                                        _callback=callback,
                                        port_ids=port_ids)
        else:
            # Just one port to disconnect
            if port_id:
                port_ids.append(port_id)
            else:
                # Awkward but lets get the port id from name etc so that the rest can loop over port ids
                try:
                    port = self._get_local_port(actor_id, port_name, port_dir,
                                                port_id)
                except:
                    # not local
                    status = response.CalvinResponse(
                        response.NOT_FOUND,
                        "Port %s on actor %s must be local" %
                        (port_name if port_name else port_id,
                         actor_id if actor_id else "some"))
                    if callback:
                        callback(status=status,
                                 actor_id=actor_id,
                                 port_name=port_name,
                                 port_id=port_id)
                        return
                    else:
                        raise Exception(str(status))
                else:
                    # Found locally
                    port_ids.append(port.id)

        _log.analyze(self.node.id, "+", {'port_ids': port_ids})

        # Run over copy of list of ports since modified inside the loop
        for port_id in port_ids[:]:
            self._disconnect_port(callback, port_id)
Exemplo n.º 27
0
    def _connect(self, **state):
        """ Do the connection of ports, all neccessary information supplied but
            maybe not all pre-requisites for remote connections.
        """
        _log.analyze(self.node.id,
                     "+",
                     {k: state[k]
                      for k in state.keys() if k != 'callback'},
                     peer_node_id=state['peer_node_id'])
        # Local connect
        if self.node.id == state['peer_node_id']:
            _log.analyze(
                self.node.id,
                "+ LOCAL",
                {k: state[k]
                 for k in state.keys() if k != 'callback'},
                peer_node_id=state['peer_node_id'])
            port1 = self._get_local_port(state['actor_id'], state['port_name'],
                                         state['port_dir'], state['port_id'])
            port2 = self._get_local_port(state['peer_actor_id'],
                                         state['peer_port_name'],
                                         state['peer_port_dir'],
                                         state['peer_port_id'])
            # Local connect wants the first port to be an inport
            inport, outport = (port1,
                               port2) if isinstance(port1, InPort) else (port2,
                                                                         port1)
            self._connect_via_local(inport, outport)
            if state['callback']:
                state['callback'](status=response.CalvinResponse(True),
                                  **state)
            return None

        # Remote connection
        # TODO Currently we only have support for setting up a remote connection via tunnel
        tunnel = None
        if not state['peer_node_id'] in self.tunnels.iterkeys():
            # No tunnel to peer, get one first
            _log.analyze(
                self.node.id,
                "+ GET TUNNEL",
                {k: state[k]
                 for k in state.keys() if k != 'callback'},
                peer_node_id=state['peer_node_id'])
            tunnel = self.proto.tunnel_new(state['peer_node_id'], 'token', {})
            tunnel.register_tunnel_down(CalvinCB(self.tunnel_down, tunnel))
            tunnel.register_tunnel_up(CalvinCB(self.tunnel_up, tunnel))
            tunnel.register_recv(CalvinCB(self.tunnel_recv_handler, tunnel))
            self.tunnels[state['peer_node_id']] = tunnel
        else:
            tunnel = self.tunnels[state['peer_node_id']]

        if tunnel.status == CalvinTunnel.STATUS.PENDING:
            if not state['peer_node_id'] in self.pending_tunnels:
                self.pending_tunnels[state['peer_node_id']] = []
            # call _connect_via_tunnel when we get the response of the tunnel
            self.pending_tunnels[state['peer_node_id']].append(
                CalvinCB(self._connect_via_tunnel, **state))
            return
        elif tunnel.status == CalvinTunnel.STATUS.TERMINATED:
            # TODO should we retry at this level?
            if state['callback']:
                state['callback'](status=response.CalvinResponse(
                    response.INTERNAL_ERROR),
                                  **state)
            return

        _log.analyze(
            self.node.id,
            "+ HAD TUNNEL",
            dict({k: state[k]
                  for k in state.keys() if k != 'callback'},
                 tunnel_status=self.tunnels[state['peer_node_id']].status),
            peer_node_id=state['peer_node_id'])
        self._connect_via_tunnel(status=response.CalvinResponse(True), **state)