Esempio n. 1
0
    def terminate(self, header, connection_id):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(
            conn.connection_id)  # cancel end time tear down

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header,
                                                       conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Esempio n. 2
0
    def terminate(self, header, connection_id, request_info=None):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(conn.connection_id) # cancel end time tear down

        # if we passed end time, resources have already been freed
        free_resources = True
        if conn.lifecycle_state == state.PASSED_ENDTIME:
            free_resources = False

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        if free_resources:
            yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(conn.requester_nsa, conn.requester_nsa) # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header, conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Esempio n. 3
0
    def terminate(self, header, connection_id):

        log.msg('', system=LOG_SYSTEM)
        log.msg('Terminate request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM)

        conn = yield self.getConnection(header.requester_nsa, connection_id)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(connection_id) # all good

        yield state.terminating(conn)

        defs = []
        sub_connections = yield conn.SubConnections.get()
        for sc in sub_connections:
            # we assume a provider is available
            provider = self.getProvider(sc.provider_nsa)
            header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa)
            d = provider.terminate(header, sc.connection_id)
            defs.append(d)

        results = yield defer.DeferredList(defs, consumeErrors=True)

        successes = [ r[0] for r in results ]
        if all(successes):
            yield state.terminated(conn)
            log.msg('Connection %s: Terminate succeeded' % conn.connection_id, system=LOG_SYSTEM)
            log.msg('Connection %s: All sub connections(%i) terminated' % (conn.connection_id, len(defs)), system=LOG_SYSTEM)
        else:
            # we are now in an inconsistent state...
            n_success = sum( [ 1 for s in successes if s ] )
            log.msg('Connection %s. Only %i of %i connections successfully terminated' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM)
            raise _createAggregateException(connection_id, 'terminate', results, error.ConnectionError)

        defer.returnValue(connection_id)
Esempio n. 4
0
    def terminateConfirmed(self, header, connection_id):

        sub_connection = yield self.getSubConnection(header.provider_nsa, connection_id)
        sub_connection.reservation_state = state.TERMINATED
        yield sub_connection.save()

        conn = yield sub_connection.ServiceConnection.get()
        sub_conns = yield conn.SubConnections.get()

        if all( [ sc.reservation_state == state.TERMINATED for sc in sub_conns ] ):
            yield state.terminated(conn) # we always allow, even though the canonical NSI state machine does not
            header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn())
            self.parent_requester.terminateConfirmed(header, conn.connection_id)
Esempio n. 5
0
    def terminate(self, header, connection_id):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        conn = yield self._getConnection(connection_id, header.requester_nsa)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        if conn.lifecycle_state == state.CREATED:
            yield self._doEndtime(conn)

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(conn.requester_nsa, conn.requester_nsa) # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header, conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Esempio n. 6
0
    def terminate(self, header, connection_id):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(conn.connection_id) # cancel end time tear down

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(conn.requester_nsa, conn.requester_nsa) # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header, conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Esempio n. 7
0
    def terminate(self, header, connection_id, request_info=None):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(
            conn.connection_id)  # cancel end time tear down

        # if we passed end time, resources have already been freed
        free_resources = True
        if conn.lifecycle_state == state.PASSED_ENDTIME:
            free_resources = False

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        if free_resources:
            yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header,
                                                       conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Esempio n. 8
0
    def reserve(self, header, connection_id, global_reservation_id, description, criteria):

        log.msg('', system=LOG_SYSTEM)
        log.msg('Reserve request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM)

        # rethink with modify
        if connection_id != None:
            connection_exists = yield database.ServiceConnection.exists(['connection_id = ?', connection_id])
            if connection_exists:
                raise error.ConnectionExistsError('Connection with id %s already exists' % connection_id)
            raise NotImplementedError('Cannot handly modification of existing connections yet')

        connection_id = self.conn_prefix + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(12) ] )

        sd = criteria.service_def
        source_stp = sd.source_stp
        dest_stp   = sd.dest_stp

        # policy check: one endpoint must be in local network
        if not (source_stp.network == self.network or dest_stp.network == self.network):
            raise error.ConnectionCreateError('The connection does not terminate in the network, rejecting request')

        # check that we know the networks
        #self.topology.getNetwork(source_stp.network)
        #self.topology.getNetwork(dest_stp.network)

        # check that we have path vectors to topologies
        if self.route_vectors.vector(source_stp.network) is None:
            raise error.ConnectionCreateError('No know routes to network %s' % source_stp.network)
        if self.route_vectors.vector(dest_stp.network) is None:
            raise error.ConnectionCreateError('No know routes to network %s' % dest_stp.network)

        # if the link terminates at our network, check that ports exists
        if source_stp.network == self.network:
            self.topology.getNetwork(self.network).getPort(source_stp.port)
        if dest_stp.network == self.network:
            self.topology.getNetwork(self.network).getPort(dest_stp.port)

        conn = database.ServiceConnection(connection_id=connection_id, revision=0, global_reservation_id=global_reservation_id, description=description,
                            requester_nsa=header.requester_nsa, requester_url=header.reply_to, reserve_time=datetime.datetime.utcnow(),
                            reservation_state=state.RESERVE_START, provision_state=state.RELEASED, lifecycle_state=state.CREATED,
                            source_network=source_stp.network, source_port=source_stp.port, source_label=source_stp.label,
                            dest_network=dest_stp.network, dest_port=dest_stp.port, dest_label=dest_stp.label,
                            start_time=criteria.schedule.start_time, end_time=criteria.schedule.end_time,
                            symmetrical=sd.symmetric, directionality=sd.directionality, bandwidth=sd.capacity, connection_trace=header.connection_trace)
        yield conn.save()

        # Here we should return / callback and spawn off the path creation

        # Note: At his point STP Labels are candidates and they will need to be changed later

    #    def reserveRequestsDone(results):
    #        successes = [ r[0] for r in results ]
    #        if all(successes):
    #            state.reserved(conn)
    #            log.msg('Connection %s: Reserve succeeded' % self.connection_id, system=LOG_SYSTEM)
    #            self.scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.RELEASED)
    #            return self
    #
    #        else:
    #            # terminate non-failed connections
    #            # currently we don't try and be too clever about cleaning, just do it, and switch state
    #            defs = []
    #            reserved_connections = [ conn for success,conn in results if success ]
    #            for rc in reserved_connections:
    #                d = rc.terminate()
    #                d.addCallbacks(
    #                    lambda c : log.msg('Succesfully terminated sub connection after partial reservation failure %s %s' % (c.curator(), connPath(c)) , system=LOG_SYSTEM),
    #                    lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM)
    #                )
    #                defs.append(d)
    #            dl = defer.DeferredList(defs)
    #            dl.addCallback( self.state.terminatedFailed )
    #
    #            err = self._createAggregateException(results, 'reservations', error.ConnectionCreateError)
    #            raise err

        yield state.reserveChecking(conn) # this also acts a lock

        if conn.source_network == self.network and conn.dest_network == self.network:
            path_info = ( conn.connection_id, self.network, conn.source_port, shortLabel(conn.source_label), conn.dest_port, shortLabel(conn.dest_label) )
            log.msg('Connection %s: Local link creation: %s %s#%s -> %s#%s' % path_info, system=LOG_SYSTEM)
            paths = [ [ nsa.Link(self.network, conn.source_port, conn.dest_port, conn.source_label, conn.dest_label) ] ]

        else:
            # log about creation and the connection type
            log.msg('Connection %s: Aggregate path creation: %s -> %s' % (conn.connection_id, str(source_stp), str(dest_stp)), system=LOG_SYSTEM)
            # making the connection is the same for all though :-)

#            paths = self.topology.findPaths(source_stp, dest_stp, conn.bandwidth)
#            # error out if we could not find a path
#            if not paths:
#                error_msg = 'Could not find a path for route %s/%s -> %s/%s' % (source_stp.network, source_stp.port, dest_stp.network, dest_stp.port)
#                log.msg(error_msg, system=LOG_SYSTEM)
#                raise error.TopologyError(error_msg)
#            paths.sort(key=lambda e : len(e))

            # -- vector chain path selection

            # how to this with path vector
            # 1. find topology to use from vector
            # 2. create abstracted path: local link + rest

            if source_stp.network == self.network:
                local_stp      = source_stp
                remote_stp     = dest_stp
            else:
                local_stp      = dest_stp
                remote_stp     = source_stp

            vector = self.route_vectors.vector(remote_stp.network)
            log.msg('Vector to %s via %s' % (remote_stp.network, vector), system=LOG_SYSTEM)
            ports = self.network_topology.findPorts(True)
            demarc_ports = []
            for p in ports:
                if p.outbound_port.remote_port is None or p.inbound_port.remote_port is None:
                    continue # filter out local termination ports
                if p.outbound_port.remote_port.startswith(remote_stp.network) and p.inbound_port.remote_port.startswith(remote_stp.network):
                    demarc_ports.append(p)

            if not demarc_ports:
                raise error.ConnectionCreateError('Could not find a demarction port to network topology %s' % remote_stp.network)

            ldp = demarc_ports[0] # most of the time we will only have one anyway, should iterate and build multiple paths

            rd = self.topology.findDemarcationPort(ldp)
            if not rd:
                raise error.ConnectionCreateError('Could not find a demarction port for port %s' % ldp)
            rdn, rdp = rd

            paths = [ [ nsa.Link(local_stp.network, local_stp.port, ldp.id_, local_stp.label, ldp.label()),
                        nsa.Link(remote_stp.network, rdp, remote_stp.port, ldp.label(), remote_stp.label) ] ] # the ldp label here isn't quite correct


        selected_path = paths[0] # shortest path
        log_path = ' -> '.join( [ str(p) for p in selected_path ] )
        log.msg('Attempting to create path %s' % log_path, system=LOG_SYSTEM)

        for link in selected_path:
            try:
                self.topology.getNSA(link.network)
            except error.TopologyError:
                raise error.ConnectionCreateError('No provider for network %s. Cannot create link' % link.network)

        conn_info = []
        for idx, link in enumerate(selected_path):

            provider_nsa = self.topology.getNSA(link.network)
            provider     = self.getProvider(provider_nsa.urn())

            conn_trace = header.connection_trace or [] + [ self.nsa_.urn() + ':' + conn.connection_id ]
            c_header = nsa.NSIHeader(self.nsa_.urn(), provider_nsa.urn(), connection_trace=conn_trace)

            # this has to be done more generic sometime
            sd = nsa.Point2PointService(nsa.STP(link.network, link.src_port, link.src_label),
                                        nsa.STP(link.network, link.dst_port, link.dst_label),
                                        conn.bandwidth, sd.directionality, sd.symmetric)

            # save info for db saving
            self.reservations[c_header.correlation_id] = {
                                                        'provider_nsa'  : provider_nsa.urn(),
                                                        'service_connection_id' : conn.id,
                                                        'order_id'       : idx,
                                                        'source_network' : link.network,
                                                        'source_port'    : link.src_port,
                                                        'dest_network'   : link.network,
                                                        'dest_port'      : link.dst_port }

            crt = nsa.Criteria(criteria.revision, criteria.schedule, sd)

            d = provider.reserve(c_header, None, conn.global_reservation_id, conn.description, crt)
            conn_info.append( (d, provider_nsa) )

            # Don't bother trying to save connection here, wait for reserveConfirmed

#            @defer.inlineCallbacks
#            def reserveResponse(connection_id, link_provider_nsa, order_id):
#                # need to collapse the label values when getting reserveConfirm
#                log.msg('Connection reservation for %s via %s acked' % (connection_id, link_provider_nsa), debug=True, system=LOG_SYSTEM)
#                # should probably do some sanity checks here
#                sp = service_params
#                local_link = True if link_provider_nsa == self.nsa_ else False
#                sc = database.SubConnection(provider_nsa=link_provider_nsa.urn(),
#                                            connection_id=connection_id, local_link=local_link, revision=0, service_connection_id=conn.id, order_id=order_id,
#                                            global_reservation_id=global_reservation_id, description=description,
#                                            reservation_state=state.RESERVE_START, provision_state=state.RELEASED, lifecycle_state=state.CREATED, data_plane_active=False,
#                                            source_network=sp.source_stp.network, source_port=sp.source_stp.port, source_label=sp.source_stp.label,
#                                            dest_network=sp.dest_stp.network, dest_port=sp.dest_stp.port, dest_label=sp.dest_stp.label,
#                                            start_time=sp.start_time.isoformat(), end_time=sp.end_time.isoformat(), bandwidth=sp.bandwidth)
#                yield sc.save()
#                defer.returnValue(sc)
#
#            d.addCallback(reserveResponse, provider_nsa, idx)

        results = yield defer.DeferredList( [ c[0] for c in conn_info ], consumeErrors=True) # doesn't errback
        successes = [ r[0] for r in results ]

        if all(successes):
            log.msg('Connection %s: Reserve acked' % conn.connection_id, system=LOG_SYSTEM)
            defer.returnValue(connection_id)

        else:
            # terminate non-failed connections
            # currently we don't try and be too clever about cleaning, just do it, and switch state
            yield state.terminating(conn)
            defs = []
            reserved_connections = [ (sc_id, provider_nsa) for (success,sc_id),(_,provider_nsa) in zip(results, conn_info) if success ]
            for (sc_id, provider_nsa) in reserved_connections:

                provider = self.getProvider(provider_nsa.urn())
                t_header = nsa.NSIHeader(self.nsa_.urn(), provider_nsa.urn())

                d = provider.terminate(t_header, sc_id)
                d.addCallbacks(
                    lambda c : log.msg('Succesfully terminated sub connection %s at %s after partial reservation failure.' % (sc_id, provider_nsa.urn()) , system=LOG_SYSTEM),
                    lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM)
                )
                defs.append(d)
            dl = defer.DeferredList(defs)
            yield dl
            yield state.terminated(conn)

            err = _createAggregateException(connection_id, 'reservations', results, error.ConnectionCreateError)
            raise err