def _migrate_got_app(self, key, value, app_id, deploy_info, move, cb):
     if response.isfailresponse(value):
         if cb:
             cb(status=response.CalvinResponse(response.NOT_FOUND))
         return
     app = Application(app_id, value['name'], value['origin_node_id'],
                                           self._node.am, actors=value['actors_name_map'], deploy_info=deploy_info)
     app.group_components()
     app._migrated_actors = {a: None for a in app.actors}
     for actor_id, actor_name in app.actors.iteritems():
         req = app.get_req(actor_name)
         if not req:
             _log.analyze(self._node.id, "+ NO REQ", {'actor_id': actor_id, 'actor_name': actor_name})
             # No requirement then leave as is.
             self._migrated_cb(response.CalvinResponse(True), app, actor_id, cb)
             continue
         if actor_id in self._node.am.actors:
             _log.analyze(self._node.id, "+ OWN ACTOR", {'actor_id': actor_id, 'actor_name': actor_name})
             self._node.am.update_requirements(actor_id, req, False, move,
                                              callback=CalvinCB(self._migrated_cb, app=app,
                                                                actor_id=actor_id, cb=cb))
         else:
             _log.analyze(self._node.id, "+ OTHER NODE", {'actor_id': actor_id, 'actor_name': actor_name})
             self.storage.get_actor(actor_id, cb=CalvinCB(self._migrate_from_rt, app=app,
                                                               actor_id=actor_id, req=req,
                                                               move=move, cb=cb))
    def _update_cache_request_finished(self, key, value, callback, force=False):
        """ Called by storage when the node is (not) found """
        _log.debug("Got response from storage key = %s, value = %s, callback = %s, force = %s", key, value, callback, force)
        _log.analyze(self.node.id, "+", {'value': value}, peer_node_id=key, tb=True)

        if response.isfailresponse(value):
            # the peer_id did not exist in storage
            _log.info("Failed to get node %s info from storage", key)
            if key in self._peer_cache:
                self._peer_cache.pop(key)
            if callback:
                callback(status=response.CalvinResponse(response.NOT_FOUND, {'peer_node_id': key}))
            return

        matching = [s for s in value['attributes']['indexed_public'] if "node_name" in s]
        if matching:
            first_match = matching[0].split("node_name/")[1]
            server_node_name_as_str = first_match.replace("/","-")
        else:
            server_node_name_as_str = None

        # Set values from storage
        self._peer_cache[key]['uris'] = value['uris']
        self._peer_cache[key]['timestamp'] = time.time()
        self._peer_cache[key]['server_name'] = server_node_name_as_str

        if 'proxy' not in value:
            # join the peer node
            self._link_request(key, callback=callback, force=True)
        else:
            if value['proxy'] == self.node.id:
                _log.error("No link to proxy client '%s'", key)
            else:
                self.link_request(value['proxy'], CalvinCB(self._routing_link_finished, dest_peer_id=key, callback=callback))
 def _got_response(status):
     if calvinresponse.isfailresponse(status) and status.status == calvinresponse.NOT_FOUND:
         # Leader moved? Retry
         del self.leaders_cache[peer_replication_id]
         if retry < 3:
             self.call_peer_leader(peer_replication_id, func, callback, retry=retry+1)
             return
     callback(status=status)
 def _migrate_from_rt(self, key, value, app, actor_id, req, move, cb):
     if response.isfailresponse(value):
         self._migrated_cb(response.CalvinResponse(response.NOT_FOUND), app, actor_id, cb)
         return
     _log.analyze(self._node.id, "+", {'actor_id': actor_id, 'node_id': value['node_id']},
                                                             peer_node_id=value['node_id'])
     self._node.proto.actor_migrate(value['node_id'], CalvinCB(self._migrated_cb, app=app, actor_id=actor_id, cb=cb),
                                  actor_id, req, False, move)
    def _update_cache_request_finished(self,
                                       key,
                                       value,
                                       callback,
                                       force=False):
        """ Called by storage when the node is (not) found """
        _log.debug(
            "Got response from storage key = %s, value = %s, callback = %s, force = %s",
            key, value, callback, force)
        _log.analyze(self.node.id,
                     "+", {'value': value},
                     peer_node_id=key,
                     tb=True)

        if response.isfailresponse(value):
            # the peer_id did not exist in storage
            _log.info("Failed to get node %s info from storage", key)
            if key in self._peer_cache:
                self._peer_cache.pop(key)
            if callback:
                callback(status=response.CalvinResponse(
                    response.NOT_FOUND, {'peer_node_id': key}))
            return

        matching = [
            s for s in value['attributes']['indexed_public']
            if "node_name" in s
        ]
        if matching:
            first_match = matching[0].split("node_name/")[1]
            server_node_name_as_str = first_match.replace("/", "-")
        else:
            server_node_name_as_str = None

        # Set values from storage
        self._peer_cache[key]['uris'] = value['uris']
        self._peer_cache[key]['timestamp'] = time.time()
        self._peer_cache[key]['server_name'] = server_node_name_as_str

        if 'proxy' not in value:
            # join the peer node
            self._link_request(key, callback=callback, force=True)
        else:
            if value['proxy'] == self.node.id:
                _log.error("No link to proxy client '%s'", key)
            else:
                self.link_request(
                    value['proxy'],
                    CalvinCB(self._routing_link_finished,
                             dest_peer_id=key,
                             callback=callback))
Beispiel #6
0
 def _migrate_got_app(self, key, value, app_id, deploy_info, move, cb):
     if response.isfailresponse(value):
         if cb:
             cb(status=response.CalvinResponse(response.NOT_FOUND))
         return
     app = Application(app_id,
                       value['name'],
                       value['origin_node_id'],
                       self._node.am,
                       actors=value['actors_name_map'],
                       deploy_info=deploy_info)
     app.group_components()
     app._migrated_actors = {a: None for a in app.actors}
     for actor_id, actor_name in app.actors.iteritems():
         req = app.get_req(actor_name)
         if not req:
             _log.analyze(self._node.id, "+ NO REQ", {
                 'actor_id': actor_id,
                 'actor_name': actor_name
             })
             # No requirement then leave as is.
             self._migrated_cb(response.CalvinResponse(True), app, actor_id,
                               cb)
             continue
         if actor_id in self._node.am.actors:
             _log.analyze(self._node.id, "+ OWN ACTOR", {
                 'actor_id': actor_id,
                 'actor_name': actor_name
             })
             self._node.am.update_requirements(actor_id,
                                               req,
                                               False,
                                               move,
                                               callback=CalvinCB(
                                                   self._migrated_cb,
                                                   app=app,
                                                   actor_id=actor_id,
                                                   cb=cb))
         else:
             _log.analyze(self._node.id, "+ OTHER NODE", {
                 'actor_id': actor_id,
                 'actor_name': actor_name
             })
             self.storage.get_actor(actor_id,
                                    cb=CalvinCB(self._migrate_from_rt,
                                                app=app,
                                                actor_id=actor_id,
                                                req=req,
                                                move=move,
                                                cb=cb))
Beispiel #7
0
 def _migrate_from_rt(self, key, value, app, actor_id, req, move, cb):
     if response.isfailresponse(value):
         self._migrated_cb(response.CalvinResponse(response.NOT_FOUND), app,
                           actor_id, cb)
         return
     _log.analyze(self._node.id,
                  "+", {
                      'actor_id': actor_id,
                      'node_id': value['node_id']
                  },
                  peer_node_id=value['node_id'])
     self._node.proto.actor_migrate(
         value['node_id'],
         CalvinCB(self._migrated_cb, app=app, actor_id=actor_id, cb=cb),
         actor_id, req, False, move)
    def replicate(self, replication_id, dst_node_id, callback, status=None, locked=False):
        """ Can't be called directly, only via replicate_by_requirements or replicate_by_known_placement
            due to READY check is handled in these functions
            Will perform the actual replication after aquiring a lock towards connected peers'
            potential replication manager.
        """
        if not locked:
            # Retry when we have lock
            return self.lock_peer_replication(replication_id,
                CalvinCB(self.replicate, replication_id, dst_node_id, callback, locked=True))
        elif locked and calvinresponse.isfailresponse(status):
            # Locked callback and failed status, abort
            if callback:
                callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
        try:
            replication_data = self.managed_replications[replication_id]
        except:
            if callback:
                callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
            return
        _log.analyze(self.node.id, "+", {'replication_id': replication_id, 'dst_node_id': dst_node_id})
        # TODO make name a property that combine name and counter in actor
        new_id = uuid("ACTOR")
        # FIXME change this time stuff when changing replication_loop
        replication_data.check_instances = time.time()
        replication_data.add_replica(new_id)
        new_name = replication_data.actor_state['private']["_name"] + "/{}".format(replication_data.counter)
        state = copy.deepcopy(replication_data.actor_state)
        state['private']['_name'] = new_name
        state['private']['_id'] = new_id
        state['private']['_replication_id']['index'] = replication_data.counter
        # Remove unneeded port states since will populate the standard private ones
        del state['replication']['inports']
        del state['replication']['outports']
        rep_state = replication_data.actor_state['replication']
        # Need to first build connection_list from previous connections
        connection_list = []
        ports = [p['id'] for p in rep_state['inports'].values() + rep_state['outports'].values()]
        _log.debug("REPLICA CONNECT %s " % ports)

        def _got_port(key, value, port, dir):
            ports.remove(port['id'])
            _log.debug("REPLICA CONNECT got port %s %s" % (port['id'], value))
            if calvinresponse.isnotfailresponse(value) and 'peers' in value:
                new_port_id = uuid("PORT")
                connection_list.extend(
                    [(dst_node_id, new_port_id, self.node.id if p[0] == 'local' else p[0], p[1]) for p in value['peers']])
                state['private'][dir + 'ports'][port['name']] = copy.deepcopy(port)
                state['private'][dir + 'ports'][port['name']]['id'] = new_port_id
            else:
                # TODO Don't know how to handle this, retry? Why would the original port be gone from registry?
                # Potentially when destroying application while we replicate, seems OK to ignore
                _log.warning("During replication failed to find original port in repository")
            if not ports:
                # Got all responses
                self._replicate_cont(replication_data, state, connection_list, dst_node_id, callback=callback)

        for port in rep_state['inports'].values():
            self.node.storage.get_port(port['id'], cb=CalvinCB(_got_port, port=port, dir="in"))
        for port in rep_state['outports'].values():
            self.node.storage.get_port(port['id'], cb=CalvinCB(_got_port, port=port, dir="out"))
Beispiel #9
0
def storage_cb(self, key, value, handle, connection):
    missing = calvinresponse.isfailresponse(value)
    self.send_response(handle, connection, None if missing else json.dumps(value),
                       status=calvinresponse.NOT_FOUND if missing else calvinresponse.OK)