Beispiel #1
0
    def _doTeardown(self, conn):
        # this one is not used as a stand-alone, just a utility function

        src_target = self.connection_manager.getTarget(conn.source_port,
                                                       conn.source_label)
        dst_target = self.connection_manager.getTarget(conn.dest_port,
                                                       conn.dest_label)
        try:
            log.msg('Connection %s: Deactivating data plane...' %
                    conn.connection_id,
                    system=self.log_system)
            yield self.connection_manager.teardownLink(conn.connection_id,
                                                       src_target, dst_target,
                                                       conn.bandwidth)
        except Exception as e:
            # We need to mark failure in state machine here somehow....
            log.msg('Connection %s: Error deactivating data plane: %s' %
                    (conn.connection_id, str(e)),
                    system=self.log_system)
            # should include stack trace
            conn.data_plane_active = False  # technically we don't know, but for NSI that means not active
            yield conn.save()

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            service_ex = None
            self.parent_requester.errorEvent(header, conn.connection_id,
                                             self.getNotificationId(), now,
                                             'deactivateFailed', None,
                                             service_ex)

            defer.returnValue(None)

        try:
            conn.data_plane_active = False  # technically we don't know, but for NSI that means not active
            yield conn.save()
            log.msg('Connection %s: Data planed deactivated' %
                    (conn.connection_id),
                    system=self.log_system)

            now = datetime.datetime.utcnow()
            data_plane_status = (False, conn.revision, True
                                 )  # active, version, onsistent
            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            self.parent_requester.dataPlaneStateChange(
                header, conn.connection_id, self.getNotificationId(), now,
                data_plane_status)

        except Exception as e:
            log.msg('Error in post-deactivation: %s' % e)
            log.err(e)
Beispiel #2
0
    def terminate(self, header, connection_id):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(
            conn.connection_id)  # cancel end time tear down

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header,
                                                       conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Beispiel #3
0
    def _doActivate(self, conn):

        src_target = self.connection_manager.getTarget(
            conn.source_port, conn.source_label.type_,
            conn.source_label.labelValue())
        dst_target = self.connection_manager.getTarget(
            conn.dest_port, conn.dest_label.type_,
            conn.dest_label.labelValue())
        try:
            log.msg('Connection %s: Activating data plane...' %
                    conn.connection_id,
                    system=self.log_system)
            yield self.connection_manager.setupLink(conn.connection_id,
                                                    src_target, dst_target,
                                                    conn.bandwidth)
        except Exception, e:
            # We need to mark failure in state machine here somehow....
            log.msg('Connection %s: Error activating data plane: %s' %
                    (conn.connection_id, str(e)),
                    system=self.log_system)
            # should include stack trace
            conn.data_plane_active = False
            yield conn.save()

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            service_ex = None
            self.parent_requester.errorEvent(header, conn.connection_id,
                                             self.getNotificationId(), now,
                                             'activateFailed', None,
                                             service_ex)

            defer.returnValue(None)
Beispiel #4
0
    def reserveAbort(self, header, connection_id):

        log.msg('ReserveAbort request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        try:
            log.msg('ReserveAbort request. Connection ID: %s' % connection_id,
                    system=self.log_system)

            conn = yield self._getConnection(connection_id,
                                             header.requester_nsa)
            if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
                raise error.ConnectionGoneError(
                    'Connection %s has been terminated')

            yield self._doReserveRollback(conn)

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            self.parent_requester.reserveAbortConfirmed(
                header, conn.connection_id)

        except Exception, e:
            log.msg('Error in reserveAbort: %s: %s' % (type(e), e),
                    system=self.log_system)
Beispiel #5
0
class DUDBackendTest(GenericProviderTest, unittest.TestCase):

    requester_agent = nsa.NetworkServiceAgent('test-requester:nsa',
                                              'dud_endpoint1')
    provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa',
                                             'dud_endpoint2')

    header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn())

    def setUp(self):

        self.clock = task.Clock()

        self.requester = common.DUDRequester()

        nrm_ports = nrm.parsePortSpec(
            StringIO.StringIO(topology.ARUBA_TOPOLOGY))

        self.backend = dud.DUDNSIBackend(self.network, nrm_ports,
                                         self.requester, {})

        self.provider = self.backend
        self.provider.scheduler.clock = self.clock
        self.provider.startService()

        tcf = os.path.expanduser('~/.opennsa-test.json')
        tc = json.load(open(tcf))
        database.setupDatabase(tc['database'], tc['database-user'],
                               tc['database-password'])

        # request stuff
        self.start_time = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=2)
        self.end_time = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=10)

        self.schedule = nsa.Schedule(self.start_time, self.end_time)
        self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp,
                                         self.bandwidth, cnt.BIDIRECTIONAL,
                                         False, None)
        self.criteria = nsa.Criteria(0, self.schedule, self.sd)

        return self.backend.restore_defer

    @defer.inlineCallbacks
    def tearDown(self):
        from opennsa.backends.common import genericbackend
        yield self.provider.stopService()
        # delete all connections from test database
        yield genericbackend.GenericBackendConnections.deleteAll()

        # close database connections, so we don't run out
        from twistar.registry import Registry
        Registry.DBPOOL.close()
Beispiel #6
0
    def render_POST(self, request):

        allowed, msg, request_info = requestauthz.checkAuthz(
            request, self.allowed_hosts)
        if not allowed:
            payload = msg + RN
            return _requestResponse(request, 401, payload)  # Not Authorized

        state_command = request.content.read()
        state_command = state_command.upper()
        if state_command not in ('COMMIT', 'ABORT', 'PROVISION', 'RELEASE',
                                 'TERMINATE'):
            payload = 'Invalid state command specified' + RN
            return _requestResponse(request, 400, payload)  # Client Error

        header = nsa.NSIHeader('rest-dud-requester',
                               'rest-dud-provider')  # completely bogus header

        if state_command == 'COMMIT':
            d = self.provider.reserveCommit(header, self.connection_id,
                                            request_info)
        elif state_command == 'ABORT':
            d = self.provider.reserveAbort(header, self.connection_id,
                                           request_info)
        elif state_command == 'PROVISION':
            d = self.provider.provision(header, self.connection_id,
                                        request_info)
        elif state_command == 'RELEASE':
            d = self.provider.release(header, self.connection_id, request_info)
        elif state_command == 'TERMINATE':
            d = self.provider.terminate(header, self.connection_id,
                                        request_info)
        else:
            payload = 'Unrecognized command (should not happend)' + RN
            return _requestResponse(request, 500, payload)  # Server Error

        def commandDone(_):
            payload = 'ACK' + RN
            _finishRequest(request, 200, payload)  # OK

        def commandError(err):
            log.msg('Error during state switch: %s' % str(err),
                    system=LOG_SYSTEM)
            payload = str(err.getErrorMessage()) + RN
            if isinstance(err.value, error.NSIError):
                _finishRequest(request, 400, payload)  # Client Error
            else:
                log.err(err)
                _finishRequest(request, 500, payload)  # Server Error

        d.addCallbacks(commandDone, commandError)
        return server.NOT_DONE_YET
Beispiel #7
0
    def _doReserve(self, conn, correlation_id):

        # we have already checked resource availability, so can progress directly through checking
        state.reserveMultiSwitch(conn, state.RESERVE_CHECKING,
                                 state.RESERVE_HELD)
        yield conn.save()
        self.logStateUpdate(conn, 'RESERVE CHECKING/HELD')

        # schedule 2PC timeout
        if self.scheduler.hasScheduledCall(conn.connection_id):
            # this means that the build scheduler made a call while we yielded
            self.scheduler.cancelCall(conn.connection_id)

        now = datetime.datetime.utcnow()
        timeout_time = min(now + datetime.timedelta(seconds=self.TPC_TIMEOUT),
                           conn.end_time)

        self.scheduler.scheduleCall(conn.connection_id, timeout_time,
                                    self._doReserveTimeout, conn)
        td = timeout_time - datetime.datetime.utcnow()
        log.msg(
            'Connection %s: reserve abort scheduled for %s UTC (%i seconds)' %
            (conn.connection_id, timeout_time.replace(microsecond=0),
             td.total_seconds()),
            system=self.log_system)

        schedule = nsa.Schedule(conn.start_time, conn.end_time)
        sc_source_stp = nsa.STP(conn.source_network, conn.source_port,
                                conn.source_label)
        sc_dest_stp = nsa.STP(conn.dest_network, conn.dest_port,
                              conn.dest_label)
        sd = nsa.Point2PointService(
            sc_source_stp, sc_dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL,
            False, None)  # we fake some things due to db limitations
        crit = nsa.Criteria(conn.revision, schedule, sd)

        header = nsa.NSIHeader(
            conn.requester_nsa,
            conn.requester_nsa,
            correlation_id=correlation_id
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.reserveConfirmed(
            header, conn.connection_id, conn.global_reservation_id,
            conn.description, crit)
Beispiel #8
0
    def reserveAbort(self, header, connection_id, request_info=None):

        log.msg('ReserveAbort request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
            raise error.ConnectionGoneError(
                'Connection %s has been terminated')

        yield self._doReserveRollback(conn)

        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        self.parent_requester.reserveAbortConfirmed(header, conn.connection_id)
Beispiel #9
0
    def _doReserveTimeout(self, conn):

        try:
            yield state.reserveTimeout(conn)
            self.logStateUpdate(conn, 'RESERVE TIMEOUT')

            yield self._doReserveRollback(conn)

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            # the conn.requester_nsa is somewhat problematic - the backend should really know its identity
            self.parent_requester.reserveTimeout(header, conn.connection_id,
                                                 self.getNotificationId(), now,
                                                 self.TPC_TIMEOUT,
                                                 conn.connection_id,
                                                 conn.requester_nsa)

        except Exception as e:
            log.msg('Error in reserveTimeout: %s: %s' % (type(e), e),
                    system=self.log_system)
Beispiel #10
0
def parseRequest(soap_data):

    headers, bodies = minisoap.parseSoapPayload(soap_data)

    if headers is None:
        raise ValueError('No header specified in payload')
    elif len(headers) > 1:
        raise ValueError('Multiple headers specified in payload')

    header = nsiframework.parseElement(headers[0])
    security_attributes = []
    if header.sessionSecurityAttr:
        for ssa in header.sessionSecurityAttr:
            for attr in ssa.Attributes:
                for av in attr.AttributeValue:
                    if av is None:
                        continue
                    security_attributes.append(
                        nsa.SecurityAttribute(attr.Name, av))

    #if header.protocolVersion not in [ cnt.CS2_REQUESTER, cnt.CS2_PROVIDER ]:
    #    raise ValueError('Invalid protocol "%s". Only %s supported' % (header.protocolVersion, cnt.CS2_SERVICE_TYPE))

    if len(bodies) == 0:
        body = None
    elif len(bodies) == 1:
        body = nsiconnection.parseElement(bodies[0])
    else:
        body = [nsiconnection.parseElement(b) for b in bodies]

    nsi_header = nsa.NSIHeader(header.requesterNSA,
                               header.providerNSA,
                               header.correlationId,
                               header.replyTo,
                               security_attributes=security_attributes,
                               connection_trace=header.connectionTrace)

    return nsi_header, body
Beispiel #11
0
    def terminate(self, header, connection_id, request_info=None):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(
            conn.connection_id)  # cancel end time tear down

        # if we passed end time, resources have already been freed
        free_resources = True
        if conn.lifecycle_state == state.PASSED_ENDTIME:
            free_resources = False

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        if free_resources:
            yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header,
                                                       conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')
Beispiel #12
0
class AggregatorTest(GenericProviderTest, unittest.TestCase):

    requester_agent = nsa.NetworkServiceAgent('test-requester:nsa',
                                              'dud_endpoint1')
    provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa',
                                             'dud_endpoint2')
    header = nsa.NSIHeader(
        requester_agent.urn(),
        provider_agent.urn(),
        connection_trace=[requester_agent.urn() + ':1'],
        security_attributes=[nsa.SecurityAttribute('user', 'testuser')])

    def setUp(self):

        tcf = os.path.expanduser('~/.opennsa-test.json')
        tc = json.load(open(tcf))
        database.setupDatabase(tc['database'], tc['database-user'],
                               tc['database-password'])

        self.requester = common.DUDRequester()

        self.clock = task.Clock()

        nrm_ports = nrm.parsePortSpec(
            StringIO.StringIO(topology.ARUBA_TOPOLOGY))
        network_topology = nml.createNMLNetwork(nrm_ports, self.network,
                                                self.network)

        self.backend = dud.DUDNSIBackend(self.network, nrm_ports,
                                         self.requester, {})
        self.backend.scheduler.clock = self.clock

        route_vectors = gns.RouteVectors([cnt.URN_OGF_PREFIX + self.network])
        route_vectors.updateVector(self.provider_agent.identity, 0,
                                   [self.network], {})

        pl = plugin.BasePlugin()
        pl.init({config.NETWORK_NAME: self.network}, None)

        pr = provreg.ProviderRegistry(
            {self.provider_agent.urn(): self.backend}, {})
        self.provider = aggregator.Aggregator(self.network,
                                              self.provider_agent,
                                              network_topology, route_vectors,
                                              self.requester, pr, [], pl)

        # set parent for backend, we need to create the aggregator before this can be done
        self.backend.parent_requester = self.provider
        self.backend.startService()

        # request stuff
        self.start_time = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=2)
        self.end_time = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=10)

        self.schedule = nsa.Schedule(self.start_time, self.end_time)
        self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp,
                                         self.bandwidth, cnt.BIDIRECTIONAL,
                                         False, None)
        self.criteria = nsa.Criteria(0, self.schedule, self.sd)

    @defer.inlineCallbacks
    def tearDown(self):
        from opennsa.backends.common import genericbackend
        # keep it simple...
        yield genericbackend.GenericBackendConnections.deleteAll()
        yield database.SubConnection.deleteAll()
        yield database.ServiceConnection.deleteAll()
        # close database connections, so we don't run out
        from twistar.registry import Registry
        Registry.DBPOOL.close()
Beispiel #13
0
    def render_POST(self, request):

        allowed, msg, request_info = requestauthz.checkAuthz(
            request, self.allowed_hosts)
        if not allowed:
            payload = msg + RN
            return _requestResponse(request, 401, payload)  # Not Authorized

        payload = request.content.read()

        if len(payload) == 0:
            log.msg('No data received in request', system=LOG_SYSTEM)
            payload = 'No data received in request' + RN
            return _requestResponse(request, 400, payload)  # Bad Request

        if len(payload) > 32 * 1024:
            log.msg('Rejecting request, payload too large. Length %i' %
                    len(payload),
                    system=LOG_SYSTEM)
            payload = 'Requests too large' + RN
            return _requestResponse(request, 413, payload)  # Payload Too Large

        try:
            data = json.loads(payload)
        except ValueError:
            log.msg('Invalid JSON data received, returning 400',
                    system=LOG_SYSTEM)
            payload = 'Invalid JSON data' + RN
            return _requestResponse(request, 400, payload)  # Bad Request

        def createResponse(connection_id):

            payload = 'Connection created' + RN
            header = {'location': self.base_path + '/' + connection_id}
            _finishRequest(request, 201, payload, header)  # Created
            return connection_id

        # extract stuffs
        try:
            source = data['source']
            if not source.startswith(cnt.URN_OGF_PREFIX):
                source = cnt.URN_OGF_PREFIX + source

            destination = data['destination']
            if not destination.startswith(cnt.URN_OGF_PREFIX):
                destination = cnt.URN_OGF_PREFIX + destination

            source_stp = helper.createSTP(str(source))
            destination_stp = helper.createSTP(str(destination))

            start_time = xmlhelper.parseXMLTimestamp(
                data[START_TIME]) if START_TIME in data else None
            end_time = xmlhelper.parseXMLTimestamp(
                data[END_TIME]) if END_TIME in data else None
            capacity = data[
                'capacity'] if 'capacity' in data else 0  # Maybe None should just be best effort

            # auto commit (default true) and auto provision (defult false)
            auto_commit = False if 'auto_commit' in data and not data[
                'auto_commit'] else True
            auto_provision = True if 'auto_provision' in data and data[
                'auto_provision'] else False

            if auto_provision and not auto_commit:
                msg = 'Cannot have auto-provision without auto-commit'
                log.msg('Rejecting request: ' + msg, system=LOG_SYSTEM)
                return _requestResponse(request, 400, msg + RN)  # Bad Request

            # fillers, we don't really do this in this api
            symmetric = False
            ero = None
            params = None
            version = 0

            service_def = nsa.Point2PointService(source_stp, destination_stp,
                                                 capacity, cnt.BIDIRECTIONAL,
                                                 symmetric, ero, params)
            schedule = nsa.Schedule(start_time, end_time)
            criteria = nsa.Criteria(version, schedule, service_def)

            header = nsa.NSIHeader(
                'rest-dud-requester',
                'rest-dud-provider')  # completely bogus header

            d = self.provider.reserve(
                header, None, None, None, criteria, request_info
            )  # nones are connection_id, global resv id, description
            d.addCallbacks(createResponse,
                           _createErrorResponse,
                           errbackArgs=(request, ))

            if auto_commit:

                @defer.inlineCallbacks
                def connectionCreated(conn_id):
                    if conn_id is None:
                        # error creating connection
                        # not exactly optimal code flow here, but chainining the callback correctly for this is tricky
                        return

                    conn = yield self.provider.getConnection(conn_id)

                    def stateUpdate():
                        log.msg(
                            'stateUpdate reservation_state: %s, provision_state: %s'
                            % (str(conn.reservation_state),
                               str(conn.provision_state)),
                            debug=True,
                            system=LOG_SYSTEM)
                        if conn.reservation_state == state.RESERVE_HELD:
                            self.provider.reserveCommit(
                                header, conn_id, request_info)
                        if conn.reservation_state == state.RESERVE_START and conn.provision_state == state.RELEASED and auto_provision:
                            self.provider.provision(header, conn_id,
                                                    request_info)
                        if conn.provision_state == state.PROVISIONED:
                            state.desubscribe(conn_id, stateUpdate)

                    state.subscribe(conn_id, stateUpdate)

                d.addCallback(connectionCreated)

            return server.NOT_DONE_YET

        except Exception as e:
            #log.err(e, system=LOG_SYSTEM)
            log.msg('Error creating connection: %s' % str(e),
                    system=LOG_SYSTEM)

            error_code = _errorCode(e)
            return _requestResponse(request, error_code, str(e))
Beispiel #14
0
                end_time = now

            self.scheduler.scheduleCall(conn.connection_id, end_time,
                                        self._doEndtime, conn)
            td = end_time - datetime.datetime.utcnow()
            log.msg(
                'Connection %s: End and teardown scheduled for %s UTC (%i seconds)'
                % (conn.connection_id, end_time.replace(microsecond=0),
                   td.total_seconds()),
                system=self.log_system)

            data_plane_status = (True, conn.revision, True
                                 )  # active, version, consistent
            now = datetime.datetime.utcnow()
            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            self.parent_requester.dataPlaneStateChange(
                header, conn.connection_id, self.getNotificationId(), now,
                data_plane_status)
        except Exception, e:
            log.msg('Error in post-activation: %s: %s' % (type(e), e),
                    system=self.log_system)

    @defer.inlineCallbacks
    def _doTeardown(self, conn):
        # this one is not used as a stand-alone, just a utility function

        src_target = self.connection_manager.getTarget(
            conn.source_port, conn.source_label.type_,
            conn.source_label.labelValue())
Beispiel #15
0
class AggregatorTest(GenericProviderTest, unittest.TestCase):

    requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'dud_endpoint1')
    provider_agent  = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2')
    header          = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), connection_trace= [ requester_agent.urn() + ':1' ],
                                    security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] )

    def setUp(self):

        db.setupDatabase()

        self.requester = common.DUDRequester()

        self.clock = task.Clock()

        nrm_map = StringIO.StringIO(topology.ARUBA_TOPOLOGY)
        nrm_ports, nml_network, link_vector = setup.setupTopology(nrm_map, self.network, 'aruba.net')

        self.backend = dud.DUDNSIBackend(self.network, nrm_ports, self.requester, {})
        self.backend.scheduler.clock = self.clock

        pl = plugin.BasePlugin()
        pl.init( { config.NETWORK_NAME: self.network }, None )

        pr = provreg.ProviderRegistry( { self.provider_agent.urn() : self.backend }, {} )
        self.provider = aggregator.Aggregator(self.network, self.provider_agent, nml_network, link_vector, self.requester, pr, [], pl)

        # set parent for backend, we need to create the aggregator before this can be done
        self.backend.parent_requester = self.provider
        self.backend.startService()

        # request stuff
        self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2)
        self.end_time   = datetime.datetime.utcnow() + datetime.timedelta(seconds=10)

        self.schedule = nsa.Schedule(self.start_time, self.end_time)
        self.sd       = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None)
        self.criteria = nsa.Criteria(0, self.schedule, self.sd)


    @defer.inlineCallbacks
    def tearDown(self):
        from opennsa.backends.common import genericbackend
        # keep it simple...
        yield genericbackend.GenericBackendConnections.deleteAll()
        yield database.SubConnection.deleteAll()
        yield database.ServiceConnection.deleteAll()
        # close database connections, so we don't run out
        from twistar.registry import Registry
        Registry.DBPOOL.close()


    @defer.inlineCallbacks
    def testHairpinConnectionAllowed(self):

        self.provider.policies.append(cnt.ALLOW_HAIRPIN)

        source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') )
        dest_stp   = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783') )
        sd = nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None)
        criteria = nsa.Criteria(0, self.schedule, sd)

        self.header.newCorrelationId()
        try:
            acid = yield self.provider.reserve(self.header, None, None, None, criteria)
            yield self.requester.reserve_defer
        except Exception as e:
            self.fail('Should not have raised exception: %s' % str(e))
Beispiel #16
0
    def _doActivate(self, conn):

        src_target = self.connection_manager.getTarget(conn.source_port,
                                                       conn.source_label)
        dst_target = self.connection_manager.getTarget(conn.dest_port,
                                                       conn.dest_label)
        try:
            log.msg('Connection %s: Activating data plane...' %
                    conn.connection_id,
                    system=self.log_system)
            yield self.connection_manager.setupLink(conn.connection_id,
                                                    src_target, dst_target,
                                                    conn.bandwidth)
        except Exception as e:
            # We need to mark failure in state machine here somehow....
            #log.err(e) # note: this causes error in tests
            log.msg('Connection %s: Error activating data plane: %s' %
                    (conn.connection_id, str(e)),
                    system=self.log_system)
            # should include stack trace
            conn.data_plane_active = False
            yield conn.save()

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            service_ex = None
            self.parent_requester.errorEvent(header, conn.connection_id,
                                             self.getNotificationId(), now,
                                             'activateFailed', None,
                                             service_ex)

            defer.returnValue(None)

        try:
            conn.data_plane_active = True
            yield conn.save()
            log.msg('Connection %s: Data plane activated' %
                    (conn.connection_id),
                    system=self.log_system)

            # we might have passed end time during activation...
            end_time = conn.end_time
            now = datetime.datetime.utcnow()
            if end_time is not None and end_time < now:
                log.msg(
                    'Connection %s: passed end time during activation, scheduling immediate teardown.'
                    % conn.connection_id,
                    system=self.log_system)
                end_time = now

            if end_time is not None:
                self.scheduler.scheduleCall(conn.connection_id, end_time,
                                            self._doEndtime, conn)
                td = end_time - datetime.datetime.utcnow()
                log.msg(
                    'Connection %s: End and teardown scheduled for %s UTC (%i seconds)'
                    % (conn.connection_id, end_time.replace(microsecond=0),
                       td.total_seconds()),
                    system=self.log_system)

            data_plane_status = (True, conn.revision, True
                                 )  # active, version, consistent
            now = datetime.datetime.utcnow()
            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            self.parent_requester.dataPlaneStateChange(
                header, conn.connection_id, self.getNotificationId(), now,
                data_plane_status)
        except Exception as e:
            log.msg('Error in post-activation: %s: %s' % (type(e), e),
                    system=self.log_system)
            log.err(e)
Beispiel #17
0
class RemoteProviderTest(GenericProviderTest, unittest.TestCase):

    PROVIDER_PORT = 8180
    REQUESTER_PORT = 8280

    requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'http://localhost:%i/NSI/services/RequesterService2' % REQUESTER_PORT)
    provider_agent  = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'http://localhost:%i/NSI/services/CS2' % PROVIDER_PORT)
    header   = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), reply_to=requester_agent.endpoint, connection_trace=[ requester_agent.urn() + ':1' ],
                             security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] )

    def setUp(self):
        from twisted.web import resource, server
        from twisted.application import internet
        from opennsa.protocols import nsi2
        from opennsa.protocols.shared import soapresource
        from opennsa.protocols.nsi2 import requesterservice, requesterclient

        db.setupDatabase()

        self.requester = common.DUDRequester()

        self.clock = task.Clock()

        nrm_map = StringIO.StringIO(topology.ARUBA_TOPOLOGY)
        nrm_ports, nml_network, link_vector = setup.setupTopology(nrm_map, self.network, 'aruba.net')

        self.backend = dud.DUDNSIBackend(self.network, nrm_ports, None, {}) # we set the parent later
        self.backend.scheduler.clock = self.clock

        pl = plugin.BasePlugin()
        pl.init( { config.NETWORK_NAME: self.network }, None )

        pr = provreg.ProviderRegistry( { self.provider_agent.urn() : self.backend }, {} )
        self.aggregator = aggregator.Aggregator(self.network, self.provider_agent, nml_network, link_vector, None, pr, [], pl) # we set the parent later

        self.backend.parent_requester = self.aggregator

        # provider protocol
        http_top_resource = resource.Resource()

        cs2_prov = nsi2.setupProvider(self.aggregator, http_top_resource)
        self.aggregator.parent_requester = cs2_prov

        provider_factory = server.Site(http_top_resource)
        self.provider_service = internet.TCPServer(self.PROVIDER_PORT, provider_factory)

        # requester protocol

        requester_top_resource = resource.Resource()
        soap_resource = soapresource.setupSOAPResource(requester_top_resource, 'RequesterService2')

        self.provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint)

        requester_service = requesterservice.RequesterService(soap_resource, self.requester) # this is the important part
        requester_factory = server.Site(requester_top_resource, logPath='/dev/null')

        # start engines!
        self.backend.startService()
        self.provider_service.startService()
        self.requester_iport = reactor.listenTCP(self.REQUESTER_PORT, requester_factory)

        # request stuff
        self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2)
        self.end_time   = datetime.datetime.utcnow() + datetime.timedelta(seconds=10)

        self.schedule = nsa.Schedule(self.start_time, self.end_time)
        self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth)
        self.criteria = nsa.Criteria(0, self.schedule, self.sd)


    @defer.inlineCallbacks
    def tearDown(self):

        self.backend.stopService()
        self.provider_service.stopService()
        self.requester_iport.stopListening()

        from opennsa.backends.common import genericbackend
        # keep it simple...
        yield genericbackend.GenericBackendConnections.deleteAll()
        yield database.SubConnection.deleteAll()
        yield database.ServiceConnection.deleteAll()

        # close database connections, so we don't run out
        from twistar.registry import Registry
        Registry.DBPOOL.close()


    @defer.inlineCallbacks
    def testQuerySummarySync(self):
        # sync is only available remotely

        self.header.newCorrelationId()
        acid = yield self.provider.reserve(self.header, None, 'gid-123', 'desc2', self.criteria)
        yield self.requester.reserve_defer

        yield self.provider.reserveCommit(self.header, acid)
        yield self.requester.reserve_commit_defer

        reservations = yield self.provider.querySummarySync(self.header, connection_ids = [ acid ] )

        self.failUnlessEquals(len(reservations), 1)

        ci = reservations[0]

        self.failUnlessEquals(ci.connection_id, acid)
        self.failUnlessEquals(ci.global_reservation_id, 'gid-123')
        self.failUnlessEquals(ci.description, 'desc2')

        self.failUnlessEquals(ci.requester_nsa, self.requester_agent.urn())
        self.failUnlessEquals(len(ci.criterias), 1)
        crit = ci.criterias[0]
        sd = crit.service_def

        src_stp = sd.source_stp
        dst_stp = sd.dest_stp

        self.failUnlessEquals(src_stp.network, self.network)
        self.failUnlessEquals(src_stp.port,    self.source_port)
        self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') )

        self.failUnlessEquals(dst_stp.network, self.network)
        self.failUnlessEquals(dst_stp.port,    self.dest_port)
        self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') )

        self.failUnlessEqual(sd.capacity, self.bandwidth)
        self.failUnlessEqual(crit.revision,   0)

        from opennsa import state
        rsm, psm, lsm, dps = ci.states
        self.failUnlessEquals(rsm, state.RESERVE_START)
        self.failUnlessEquals(psm, state.RELEASED)
        self.failUnlessEquals(lsm, state.CREATED)
        self.failUnlessEquals(dps[:2], (False, 0) )  # we cannot really expect a consistent result for consistent here


    @defer.inlineCallbacks
    def testQueryRecursive(self):
        # only available on aggregator and remote, we just do remote for now

        self.header.newCorrelationId()
        acid = yield self.provider.reserve(self.header, None, 'gid-123', 'desc2', self.criteria)
        yield self.requester.reserve_defer

        yield self.provider.reserveCommit(self.header, acid)
        yield self.requester.reserve_commit_defer

        self.header.newCorrelationId()
        yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] )
        header, reservations = yield self.requester.query_recursive_defer

        self.failUnlessEquals(len(reservations), 1)
        ci = reservations[0]

        self.failUnlessEquals(ci.connection_id, acid)
        self.failUnlessEquals(ci.global_reservation_id, 'gid-123')
        self.failUnlessEquals(ci.description, 'desc2')

        self.failUnlessEquals(ci.requester_nsa, self.requester_agent.urn())
        self.failUnlessEquals(len(ci.criterias), 1)
        crit = ci.criterias[0]

        src_stp = crit.service_def.source_stp
        dst_stp = crit.service_def.dest_stp

        self.failUnlessEquals(src_stp.network, self.network)
        self.failUnlessEquals(src_stp.port,    self.source_port)
        self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') )

        self.failUnlessEquals(dst_stp.network, self.network)
        self.failUnlessEquals(dst_stp.port,    self.dest_port)
        self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') )

        self.failUnlessEqual(crit.service_def.capacity, self.bandwidth)
        self.failUnlessEqual(crit.revision,   0)

        from opennsa import state
        rsm, psm, lsm, dps = ci.states
        self.failUnlessEquals(rsm, state.RESERVE_START)
        self.failUnlessEquals(psm, state.RELEASED)
        self.failUnlessEquals(lsm, state.CREATED)
        self.failUnlessEquals(dps[:2], (False, 0) )  # we cannot really expect a consistent result for consistent here

        self.failUnlessEqual(len(crit.children), 1)
        child = crit.children[0]

        rsm, psm, lsm, dps = ci.states # overwrite
        self.failUnlessEquals(rsm, state.RESERVE_START)
        self.failUnlessEquals(psm, state.RELEASED)
        self.failUnlessEquals(lsm, state.CREATED)
        self.failUnlessEquals(dps[:2], (False, 0) )  # we cannot really expect a consistent result for consistent here


    @defer.inlineCallbacks
    def testQueryRecursiveNoStartTime(self):
        # only available on aggregator and remote, we just do remote for now

        start_time = None
        criteria   = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd)

        self.header.newCorrelationId()
        acid = yield self.provider.reserve(self.header, None, 'gid-123', 'desc2', criteria)
        yield self.requester.reserve_defer

        yield self.provider.reserveCommit(self.header, acid)
        yield self.requester.reserve_commit_defer

        self.header.newCorrelationId()
        yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] )
        header, reservations = yield self.requester.query_recursive_defer

        self.failUnlessEquals(len(reservations), 1)
        ci = reservations[0]

        self.failUnlessEquals(ci.connection_id, acid)
        self.failUnlessEquals(ci.global_reservation_id, 'gid-123')
        self.failUnlessEquals(ci.description, 'desc2')

        self.failUnlessEquals(ci.requester_nsa, self.requester_agent.urn())
        self.failUnlessEquals(len(ci.criterias), 1)
        crit = ci.criterias[0]

        src_stp = crit.service_def.source_stp
        dst_stp = crit.service_def.dest_stp

        self.failUnlessEquals(src_stp.network, self.network)
        self.failUnlessEquals(src_stp.port,    self.source_port)
        self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') )

        self.failUnlessEquals(dst_stp.network, self.network)
        self.failUnlessEquals(dst_stp.port,    self.dest_port)
        self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN)
        self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') )

        self.failUnlessEqual(crit.service_def.capacity, self.bandwidth)
        self.failUnlessEqual(crit.revision,   0)

        from opennsa import state
        rsm, psm, lsm, dps = ci.states
        self.failUnlessEquals(rsm, state.RESERVE_START)
        self.failUnlessEquals(psm, state.RELEASED)
        self.failUnlessEquals(lsm, state.CREATED)
        self.failUnlessEquals(dps[:2], (False, 0) )  # we cannot really expect a consistent result for consistent here

        self.failUnlessEqual(len(crit.children), 1)
        child = crit.children[0]

        rsm, psm, lsm, dps = ci.states # overwrite
        self.failUnlessEquals(rsm, state.RESERVE_START)
        self.failUnlessEquals(psm, state.RELEASED)
        self.failUnlessEquals(lsm, state.CREATED)
        self.failUnlessEquals(dps[:2], (False, 0) )  # we cannot really expect a consistent result for consistent here
Beispiel #18
0
class GenericBackend(service.Service):

    implements(INSIProvider)

    # This is how long a reservation will be kept in reserved, but not committed state.
    # Two minutes (120 seconds) is the recommended value from the NSI group
    # Yeah, it should be much less, but some NRMs are that slow
    TPC_TIMEOUT = 120  # seconds

    def __init__(self,
                 network,
                 nrm_ports,
                 connection_manager,
                 parent_requester,
                 log_system,
                 minimum_duration=60):

        self.network = network
        self.nrm_ports = nrm_ports
        self.connection_manager = connection_manager
        self.parent_requester = parent_requester
        self.log_system = log_system
        self.minimum_duration = minimum_duration

        self.notification_id = 0

        self.scheduler = scheduler.CallScheduler()
        self.calendar = calendar.ReservationCalendar()
        # need to build the calendar as well

        # need to build schedule here
        self.restore_defer = defer.Deferred()
        reactor.callWhenRunning(self.buildSchedule)

    def startService(self):
        service.Service.startService(self)

    def stopService(self):
        service.Service.stopService(self)
        if self.restore_defer.called:
            self.scheduler.cancelAllCalls()
            return defer.succeed(None)
        else:
            return self.restore_defer.addCallback(
                lambda _: self.scheduler.cancelAllCalls())

    def getNotificationId(self):
        nid = self.notification_id
        self.notification_id += 1
        return nid

    @defer.inlineCallbacks
    def buildSchedule(self):

        conns = yield GenericBackendConnections.find(
            where=['lifecycle_state <> ?', state.TERMINATED])
        for conn in conns:
            # avoid race with newly created connections
            if self.scheduler.hasScheduledCall(conn.connection_id):
                continue

            now = datetime.datetime.utcnow()

            if conn.lifecycle_state in (state.PASSED_ENDTIME,
                                        state.TERMINATED):
                continue  # This connection has already lived it life to the fullest :-)

            if conn.reservation_state == state.RESERVE_START and not conn.allocated:
                # This happens when a connection was reserved, but never committed and abort/timeout happened
                log.msg(
                    'Connection %s: Was never comitted, not putting entry into calendar'
                    % conn.connection_id,
                    debug=True,
                    system=self.log_system)
                continue

            # add reservation, some of the following code will remove the reservation again
            src_resource = self.connection_manager.getResource(
                conn.source_port, conn.source_label)
            dst_resource = self.connection_manager.getResource(
                conn.dest_port, conn.dest_label)
            self.calendar.addReservation(src_resource, conn.start_time,
                                         conn.end_time)
            self.calendar.addReservation(dst_resource, conn.start_time,
                                         conn.end_time)

            if conn.end_time is not None and conn.end_time < now and conn.lifecycle_state not in (
                    state.PASSED_ENDTIME, state.TERMINATED):
                log.msg('Connection %s: Immediate end during buildSchedule' %
                        conn.connection_id,
                        system=self.log_system)
                yield self._doEndtime(conn)
                continue

            elif conn.reservation_state == state.RESERVE_HELD:
                abort_time = conn.reserve_time + datetime.timedelta(
                    seconds=self.TPC_TIMEOUT)
                timeout_time = min(abort_time, conn.end_time
                                   or abort_time)  # or to handle None case
                if timeout_time < now:
                    # have passed the time when timeout should occur
                    log.msg(
                        'Connection %s: Reservation Held, but timeout has passed, doing rollback'
                        % conn.connection_id,
                        system=self.log_system)
                    yield self._doReserveRollback(
                        conn)  # will remove reservation
                else:
                    td = timeout_time - now
                    log.msg(
                        'Connection %s: Reservation Held, scheduling timeout in %i seconds'
                        % (conn.connection_id, td.total_seconds()),
                        system=self.log_system)
                    self.scheduler.scheduleCall(conn.connection_id,
                                                timeout_time,
                                                self._doReserveTimeout, conn)

            elif conn.start_time is None or conn.start_time < now:
                # we have passed start time, we must either: activate, schedule deactive, or schedule terminate
                if conn.provision_state == state.PROVISIONED:
                    if conn.data_plane_active:
                        if conn.end_time is None:
                            log.msg(
                                'Connection %s: already active, no scheduled end time'
                                % conn.connection_id,
                                system=self.log_system)
                        else:
                            self.scheduler.scheduleCall(
                                conn.connection_id, conn.end_time,
                                self._doEndtime, conn)
                            td = conn.end_time - now
                            log.msg(
                                'Connection %s: already active, scheduling end for %s UTC (%i seconds) (buildSchedule)'
                                % (conn.connection_id,
                                   conn.end_time.replace(microsecond=0),
                                   td.total_seconds()),
                                system=self.log_system)
                    else:
                        log.msg(
                            'Connection %s: Immediate activate during buildSchedule'
                            % conn.connection_id,
                            system=self.log_system)
                        yield self._doActivate(conn)
                elif conn.provision_state == state.RELEASED:
                    if conn.end_time is None:
                        log.msg(
                            'Connection %s: Currently released, no end scheduled'
                            % conn.connection_id,
                            system=self.log_system)
                    else:
                        self.scheduler.scheduleCall(conn.connection_id,
                                                    conn.end_time,
                                                    self._doEndtime, conn)
                        td = conn.end_time - now
                        log.msg(
                            'Connection %s: End scheduled for %s UTC (%i seconds) (buildSchedule)'
                            % (conn.connection_id,
                               conn.end_time.replace(microsecond=0),
                               td.total_seconds()),
                            system=self.log_system)
                else:
                    log.msg(
                        'Unhandled provision state %s for connection %s in scheduler building'
                        % (conn.provision_state, conn.connection_id))

            elif conn.start_time > now:
                # start time has not yet passed, we must schedule activate or schedule terminate depending on state
                if conn.provision_state == state.PROVISIONED and conn.data_plane_active == False:
                    self.scheduler.scheduleCall(conn.connection_id,
                                                conn.start_time,
                                                self._doActivate, conn)
                    td = conn.start_time - now
                    log.msg(
                        'Connection %s: activate scheduled for %s UTC (%i seconds) (buildSchedule)'
                        % (conn.connection_id,
                           conn.end_time.replace(microsecond=0),
                           td.total_seconds()),
                        system=self.log_system)
                elif conn.provision_state == state.RELEASED:
                    self.scheduler.scheduleCall(conn.connection_id,
                                                conn.end_time, self._doEndtime,
                                                conn)
                    td = conn.end_time - now
                    log.msg(
                        'Connection %s: End scheduled for %s UTC (%i seconds) (buildSchedule)'
                        % (conn.connection_id,
                           conn.end_time.replace(microsecond=0),
                           td.total_seconds()),
                        system=self.log_system)
                else:
                    log.msg(
                        'Unhandled provision state %s for connection %s in scheduler building'
                        % (conn.provision_state, conn.connection_id))

            else:
                log.msg(
                    'Unhandled start/end time configuration for connection %s'
                    % conn.connection_id,
                    system=self.log_system)

        log.msg('Scheduled calls restored', system=self.log_system)
        self.restore_defer.callback(None)

    @defer.inlineCallbacks
    def _getConnection(self, connection_id, requester_nsa):
        # add security check sometime

        conns = yield GenericBackendConnections.findBy(
            connection_id=connection_id)
        if len(conns) == 0:
            raise error.ConnectionNonExistentError('No connection with id %s' %
                                                   connection_id)
        defer.returnValue(conns[0])  # we only get one, unique in db

    def _authorize(self,
                   source_port,
                   destination_port,
                   header,
                   request_info,
                   start_time=None,
                   end_time=None):
        """
        Checks if port usage is allowed from the credentials provided in the
        NSI header or information from the request.
        """
        nrm_source_port = self.nrm_ports[source_port]
        nrm_dest_port = self.nrm_ports[destination_port]

        source_authz = authz.isAuthorized(nrm_source_port,
                                          header.security_attributes,
                                          request_info, nrm_source_port, None,
                                          None)
        if not source_authz:
            stp_name = cnt.URN_OGF_PREFIX + self.network + ':' + nrm_source_port.name
            raise error.UnauthorizedError(
                'Request does not have any valid credentials for STP %s' %
                stp_name)

        dest_authz = authz.isAuthorized(nrm_dest_port,
                                        header.security_attributes,
                                        request_info, nrm_dest_port, None,
                                        None)
        if not dest_authz:
            stp_name = cnt.URN_OGF_PREFIX + self.network + ':' + nrm_dest_port.name
            raise error.UnauthorizedError(
                'Request does not have any valid credentials for STP %s' %
                stp_name)

    def logStateUpdate(self, conn, state_msg):
        src_target = self.connection_manager.getTarget(conn.source_port,
                                                       conn.source_label)
        dst_target = self.connection_manager.getTarget(conn.dest_port,
                                                       conn.dest_label)
        log.msg('Connection %s: %s -> %s %s' %
                (conn.connection_id, src_target, dst_target, state_msg),
                system=self.log_system)

    @defer.inlineCallbacks
    def reserve(self,
                header,
                connection_id,
                global_reservation_id,
                description,
                criteria,
                request_info=None):

        # return defer.fail( error.InternalNRMError('test reservation failure') )

        sd = criteria.service_def

        if type(sd) is not nsa.Point2PointService:
            raise ValueError(
                'Cannot handle service of type %s, only Point2PointService is currently supported'
                % type(sd))

        # should perhaps verify nsa, but not that important
        log.msg('Reserve request. Connection ID: %s' % connection_id,
                system=self.log_system)

        if connection_id:
            # if connection id is specified it is not allowed to be used a priori
            try:
                conn = yield self._getConnection(connection_id,
                                                 header.requester_nsa)
                raise ValueError('GenericBackend cannot handle modify (yet)')
            except error.ConnectionNonExistentError:
                pass  # expected

        source_stp = sd.source_stp
        dest_stp = sd.dest_stp

        # check network and ports exist

        if source_stp.network != self.network:
            raise error.ConnectionCreateError(
                'Source network does not match network this NSA is managing (%s != %s)'
                % (source_stp.network, self.network))
        if dest_stp.network != self.network:
            raise error.ConnectionCreateError(
                'Destination network does not match network this NSA is managing (%s != %s)'
                % (dest_stp.network, self.network))

        # ensure that ports actually exists
        if not source_stp.port in self.nrm_ports:
            raise error.STPUnavailableError(
                'No STP named %s (ports: %s)' %
                (source_stp.baseURN(), str(self.nrm_ports.keys())))
        if not dest_stp.port in self.nrm_ports:
            raise error.STPUnavailableError(
                'No STP named %s (ports: %s)' %
                (dest_stp.baseURN(), str(self.nrm_ports.keys())))

        start_time = criteria.schedule.start_time  # or datetime.datetime.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=1)  # no start time = now (well, in 1 second)
        end_time = criteria.schedule.end_time

        if start_time is not None and end_time is not None:
            duration = (end_time - start_time).total_seconds()
            if duration < self.minimum_duration:
                raise error.ConnectionCreateError(
                    'Duration too short, minimum duration is %i seconds (%i specified)'
                    % (self.minimum_duration, duration), self.network)

        nrm_source_port = self.nrm_ports[source_stp.port]
        nrm_dest_port = self.nrm_ports[dest_stp.port]

        self._authorize(source_stp.port, dest_stp.port, header, request_info,
                        start_time, end_time)

        # transit restriction
        if nrm_source_port.transit_restricted and nrm_dest_port.transit_restricted:
            raise error.ConnectionCreateError(
                'Cannot connect two transit restricted STPs.')

        # check that we are not connecting two identical stp (hairpin)
        if source_stp.port == dest_stp.port and source_stp.label == dest_stp.label:
            raise error.ServiceError('Cannot connect STP %s to itself.' %
                                     source_stp)

        labelType = lambda stp: None if stp.label is None else stp.label.type_

        # have the backend check if the ports/labels can be connected
        # this is needed for cases of cross-label switching like ethernet-mpls encapsulation
        if hasattr(self.connection_manager, 'canConnect'):
            if not self.connection_manager.canConnect(
                    source_stp.port, dest_stp.port, source_stp.label,
                    dest_stp.label):
                raise error.TopologyError('Cannot connect STP %s to %s.' %
                                          (source_stp, dest_stp))
        elif labelType(source_stp) != labelType(dest_stp):
            # if backend doesn't implement canConnect, we assume only the same label can be connected (old default)
            raise error.TopologyError(
                'Cannot connect ports with different label types')

        # now check that the ports have (some of) the specified label values
        if not nsa.Label.canMatch(nrm_source_port.label, source_stp.label):
            raise error.TopologyError(
                'Source port %s cannot match label set %s' %
                (nrm_source_port.name, source_stp.label))
        if not nsa.Label.canMatch(nrm_dest_port.label, dest_stp.label):
            raise error.TopologyError(
                'Destination port %s cannot match label set %s' %
                (nrm_dest_port.name, dest_stp.label))

        labelEnum = lambda label: [None] if label is None else [
            nsa.Label(label.type_, lv) for lv in label.enumerateValues()
        ]

        # do the find the label value dance
        if self.connection_manager.canSwapLabel(labelType(
                source_stp)) and self.connection_manager.canSwapLabel(
                    labelType(dest_stp)):
            for lv in labelEnum(source_stp.label):
                src_resource = self.connection_manager.getResource(
                    source_stp.port, lv)
                try:
                    self.calendar.checkReservation(src_resource, start_time,
                                                   end_time)
                    src_label = lv
                    break
                except error.STPUnavailableError:
                    pass
            else:
                raise error.STPUnavailableError(
                    'STP %s not available in specified time span' % source_stp)

            for lv in labelEnum(dest_stp.label):
                dst_resource = self.connection_manager.getResource(
                    dest_stp.port, lv)
                try:
                    self.calendar.checkReservation(dst_resource, start_time,
                                                   end_time)
                    dst_label = lv
                    break
                except error.STPUnavailableError:
                    pass
            else:
                raise error.STPUnavailableError(
                    'STP %s not available in specified time span' % dest_stp)

            # Only add reservations, when src and dest stps are both available
            self.calendar.addReservation(src_resource, start_time, end_time)
            self.calendar.addReservation(dst_resource, start_time, end_time)

        else:
            if source_stp.label is None:
                label_candidate = dest_stp.label
            elif dest_stp.label is None:
                label_candidate = source_stp.label
            else:
                try:
                    label_candidate = source_stp.label.intersect(
                        dest_stp.label)
                except nsa.EmptyLabelSet:
                    raise error.VLANInterchangeNotSupportedError(
                        'VLAN re-write not supported and no possible label intersection'
                    )

            for lv in labelEnum(label_candidate):
                src_resource = self.connection_manager.getResource(
                    source_stp.port, lv)
                dst_resource = self.connection_manager.getResource(
                    dest_stp.port, lv)
                try:
                    self.calendar.checkReservation(src_resource, start_time,
                                                   end_time)
                    self.calendar.checkReservation(dst_resource, start_time,
                                                   end_time)
                    self.calendar.addReservation(src_resource, start_time,
                                                 end_time)
                    self.calendar.addReservation(dst_resource, start_time,
                                                 end_time)
                    src_label = lv
                    dst_label = lv
                    break
                except error.STPUnavailableError:
                    continue
            else:
                raise error.STPUnavailableError(
                    'Link %s and %s not available in specified time span' %
                    (source_stp, dest_stp))

        now = datetime.datetime.utcnow()

        source_target = self.connection_manager.getTarget(
            source_stp.port, src_label)
        dest_target = self.connection_manager.getTarget(
            dest_stp.port, dst_label)
        if connection_id is None:
            connection_id = self.connection_manager.createConnectionId(
                source_target, dest_target)

        # we should check the schedule here

        # should we save the requester or provider here?
        conn = GenericBackendConnections(
            connection_id=connection_id,
            revision=0,
            global_reservation_id=global_reservation_id,
            description=description,
            requester_nsa=header.requester_nsa,
            reserve_time=now,
            reservation_state=state.RESERVE_START,
            provision_state=state.RELEASED,
            lifecycle_state=state.CREATED,
            data_plane_active=False,
            source_network=source_stp.network,
            source_port=source_stp.port,
            source_label=src_label,
            dest_network=dest_stp.network,
            dest_port=dest_stp.port,
            dest_label=dst_label,
            start_time=start_time,
            end_time=end_time,
            symmetrical=sd.symmetric,
            directionality=sd.directionality,
            bandwidth=sd.capacity,
            allocated=False)
        yield conn.save()
        reactor.callWhenRunning(self._doReserve, conn, header.correlation_id)
        defer.returnValue(connection_id)

    @defer.inlineCallbacks
    def reserveCommit(self, header, connection_id, request_info=None):

        log.msg('ReserveCommit request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
            raise error.ConnectionGoneError(
                'Connection %s has been terminated')

        # the switch to reserve start and allocated must be in same transaction
        # state.reserveMultiSwitch will save the state, including the allocated flag
        conn.allocated = True
        yield state.reserveMultiSwitch(conn, state.RESERVE_COMMITTING,
                                       state.RESERVE_START)

        self.logStateUpdate(conn, 'COMMIT/RESERVED')

        # cancel abort and schedule end time call
        self.scheduler.cancelCall(connection_id)
        if conn.end_time is not None:
            self.scheduler.scheduleCall(conn.connection_id, conn.end_time,
                                        self._doEndtime, conn)
            td = conn.end_time - datetime.datetime.utcnow()
            log.msg(
                'Connection %s: End and teardown scheduled for %s UTC (%i seconds)'
                % (conn.connection_id, conn.end_time.replace(microsecond=0),
                   td.total_seconds()),
                system=self.log_system)

        yield self.parent_requester.reserveCommitConfirmed(
            header, connection_id)

        defer.returnValue(connection_id)

    @defer.inlineCallbacks
    def reserveAbort(self, header, connection_id, request_info=None):

        log.msg('ReserveAbort request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
            raise error.ConnectionGoneError(
                'Connection %s has been terminated')

        yield self._doReserveRollback(conn)

        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        self.parent_requester.reserveAbortConfirmed(header, conn.connection_id)

    @defer.inlineCallbacks
    def provision(self, header, connection_id, request_info=None):

        log.msg('Provision request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if not conn.allocated:
            raise error.ConnectionError(
                'No resource allocated to the connection, cannot provision')

        if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
            raise error.ConnectionGoneError(
                'Connection %s has been terminated')

        if conn.reservation_state != state.RESERVE_START:
            raise error.InvalidTransitionError(
                'Cannot provision connection in a non-reserved state')

        now = datetime.datetime.utcnow()
        if conn.end_time is not None and conn.end_time <= now:
            raise error.ConnectionGoneError(
                'Cannot provision connection after end time (end time: %s, current time: %s).'
                % (conn.end_time, now))

        yield state.provisioning(conn)
        self.logStateUpdate(conn, 'PROVISIONING')

        self.scheduler.cancelCall(connection_id)

        if conn.start_time is None or conn.start_time <= now:
            self._doActivate(conn)  # returns a deferred, but it isn't used
        else:
            self.scheduler.scheduleCall(connection_id, conn.start_time,
                                        self._doActivate, conn)
            td = conn.start_time - now
            log.msg('Connection %s: activate scheduled for %s UTC (%i seconds) (provision)' % \
                    (conn.connection_id, conn.start_time.replace(microsecond=0), td.total_seconds()), system=self.log_system)

        yield state.provisioned(conn)
        self.logStateUpdate(conn, 'PROVISIONED')

        self.parent_requester.provisionConfirmed(header, connection_id)

        defer.returnValue(conn.connection_id)

    @defer.inlineCallbacks
    def release(self, header, connection_id, request_info=None):

        log.msg('Release request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED):
            raise error.ConnectionGoneError(
                'Connection %s has been terminated')

        yield state.releasing(conn)
        self.logStateUpdate(conn, 'RELEASING')

        self.scheduler.cancelCall(connection_id)

        if conn.data_plane_active:
            try:
                yield self._doTeardown(conn)  # we don't have to block here
            except Exception as e:
                log.msg('Connection %s: Error tearing down link: %s' %
                        (conn.connection_id, e))

        if conn.end_time is not None:
            self.scheduler.scheduleCall(connection_id, conn.end_time,
                                        self._doEndtime, conn)
            td = conn.end_time - datetime.datetime.utcnow()
            log.msg(
                'Connection %s: terminate scheduled for %s UTC (%i seconds)' %
                (conn.connection_id, conn.end_time.replace(microsecond=0),
                 td.total_seconds()),
                system=self.log_system)

        yield state.released(conn)
        self.logStateUpdate(conn, 'RELEASED')

        self.parent_requester.releaseConfirmed(header, connection_id)

        defer.returnValue(conn.connection_id)

    @defer.inlineCallbacks
    def terminate(self, header, connection_id, request_info=None):
        # return defer.fail( error.InternalNRMError('test termination failure') )

        log.msg('Terminate request from %s. Connection ID: %s' %
                (header.requester_nsa, connection_id),
                system=self.log_system)

        conn = yield self._getConnection(connection_id, header.requester_nsa)
        self._authorize(conn.source_port, conn.dest_port, header, request_info)

        if conn.lifecycle_state == state.TERMINATED:
            defer.returnValue(conn.cid)

        self.scheduler.cancelCall(
            conn.connection_id)  # cancel end time tear down

        # if we passed end time, resources have already been freed
        free_resources = True
        if conn.lifecycle_state == state.PASSED_ENDTIME:
            free_resources = False

        yield state.terminating(conn)
        self.logStateUpdate(conn, 'TERMINATING')

        if free_resources:
            yield self._doFreeResource(conn)

        # here the reply will practially always come before the ack
        header = nsa.NSIHeader(
            conn.requester_nsa, conn.requester_nsa
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.terminateConfirmed(header,
                                                       conn.connection_id)

        yield state.terminated(conn)
        self.logStateUpdate(conn, 'TERMINATED')

    @defer.inlineCallbacks
    def querySummary(self,
                     header,
                     connection_ids=None,
                     global_reservation_ids=None,
                     request_info=None):

        reservations = yield self._query(header, connection_ids,
                                         global_reservation_ids)
        self.parent_requester.querySummaryConfirmed(header, reservations)

    @defer.inlineCallbacks
    def queryRecursive(self,
                       header,
                       connection_ids,
                       global_reservation_ids,
                       request_info=None):

        reservations = yield self._query(header, connection_ids,
                                         global_reservation_ids)
        self.parent_requester.queryRecursiveConfirmed(header, reservations)

    @defer.inlineCallbacks
    def _query(self,
               header,
               connection_ids,
               global_reservation_ids,
               request_info=None):
        # generic query mechanism for summary and recursive

        # TODO: Match stps/ports that can be used with credentials and return connections using these STPs
        if connection_ids:
            conns = yield GenericBackendConnections.find(where=[
                'requester_nsa = ? AND connection_id IN ?',
                header.requester_nsa,
                tuple(connection_ids)
            ])
        elif global_reservation_ids:
            conns = yield GenericBackendConnections.find(where=[
                'requester_nsa = ? AND global_reservation_ids IN ?',
                header.requester_nsa,
                tuple(global_reservation_ids)
            ])
        else:
            raise error.MissingParameterError(
                'Must specify connectionId or globalReservationId')

        reservations = []
        for c in conns:
            source_stp = nsa.STP(c.source_network, c.source_port,
                                 c.source_label)
            dest_stp = nsa.STP(c.dest_network, c.dest_port, c.dest_label)
            schedule = nsa.Schedule(c.start_time, c.end_time)
            sd = nsa.Point2PointService(source_stp, dest_stp, c.bandwidth,
                                        cnt.BIDIRECTIONAL, False, None)
            criteria = nsa.QueryCriteria(c.revision, schedule, sd)
            data_plane_status = (c.data_plane_active, c.revision, True)
            states = (c.reservation_state, c.provision_state,
                      c.lifecycle_state, data_plane_status)
            notification_id = self.getNotificationId()
            result_id = notification_id  # whatever
            provider_nsa = cnt.URN_OGF_PREFIX + self.network.replace(
                'topology', 'nsa')  # hack on
            reservations.append(
                nsa.ConnectionInfo(c.connection_id, c.global_reservation_id,
                                   c.description, cnt.EVTS_AGOLE, [criteria],
                                   provider_nsa, c.requester_nsa, states,
                                   notification_id, result_id))

        defer.returnValue(reservations)

    @defer.inlineCallbacks
    def queryNotification(self,
                          header,
                          connection_id,
                          start_notification=None,
                          end_notification=None):
        raise NotImplementedError(
            'QueryNotification not implemented in generic backend.')

    # --

    @defer.inlineCallbacks
    def _doReserve(self, conn, correlation_id):

        # we have already checked resource availability, so can progress directly through checking
        yield state.reserveMultiSwitch(conn, state.RESERVE_CHECKING,
                                       state.RESERVE_HELD)
        self.logStateUpdate(conn, 'RESERVE CHECKING/HELD')

        # schedule 2PC timeout
        if self.scheduler.hasScheduledCall(conn.connection_id):
            # this means that the build scheduler made a call while we yielded
            self.scheduler.cancelCall(conn.connection_id)

        abort_timestamp = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=self.TPC_TIMEOUT)
        timeout_time = min(abort_timestamp, conn.end_time or abort_timestamp)

        self.scheduler.scheduleCall(conn.connection_id, timeout_time,
                                    self._doReserveTimeout, conn)
        td = timeout_time - datetime.datetime.utcnow()
        log.msg(
            'Connection %s: reserve abort scheduled for %s UTC (%i seconds)' %
            (conn.connection_id, timeout_time.replace(microsecond=0),
             td.total_seconds()),
            system=self.log_system)

        schedule = nsa.Schedule(conn.start_time, conn.end_time)
        sc_source_stp = nsa.STP(conn.source_network, conn.source_port,
                                conn.source_label)
        sc_dest_stp = nsa.STP(conn.dest_network, conn.dest_port,
                              conn.dest_label)
        sd = nsa.Point2PointService(
            sc_source_stp, sc_dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL,
            False, None)  # we fake some things due to db limitations
        crit = nsa.Criteria(conn.revision, schedule, sd)

        header = nsa.NSIHeader(
            conn.requester_nsa,
            conn.requester_nsa,
            correlation_id=correlation_id
        )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
        yield self.parent_requester.reserveConfirmed(
            header, conn.connection_id, conn.global_reservation_id,
            conn.description, crit)

    @defer.inlineCallbacks
    def _doReserveTimeout(self, conn):

        try:
            yield state.reserveTimeout(conn)
            self.logStateUpdate(conn, 'RESERVE TIMEOUT')

            yield self._doReserveRollback(conn)

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            # the conn.requester_nsa is somewhat problematic - the backend should really know its identity
            self.parent_requester.reserveTimeout(header, conn.connection_id,
                                                 self.getNotificationId(), now,
                                                 self.TPC_TIMEOUT,
                                                 conn.connection_id,
                                                 conn.requester_nsa)

        except Exception as e:
            log.msg('Error in reserveTimeout: %s: %s' % (type(e), e),
                    system=self.log_system)
            log.err(e)

    @defer.inlineCallbacks
    def _doReserveRollback(self, conn):

        try:
            yield state.reserveAbort(conn)
            self.logStateUpdate(conn, 'RESERVE ABORTING')

            self.scheduler.cancelCall(
                conn.connection_id
            )  # we only have this for non-timeout calls, but just cancel

            # release the resources
            src_resource = self.connection_manager.getResource(
                conn.source_port, conn.source_label)
            dst_resource = self.connection_manager.getResource(
                conn.dest_port, conn.dest_label)

            self.calendar.removeReservation(src_resource, conn.start_time,
                                            conn.end_time)
            self.calendar.removeReservation(dst_resource, conn.start_time,
                                            conn.end_time)

            yield state.reserved(
                conn
            )  # we only log this, when we haven't passed end time, as it looks wonky with start+end together

            now = datetime.datetime.utcnow()
            if conn.end_time is not None and now > conn.end_time:
                print 'abort do endtime'
                yield self._doEndtime(conn)
            elif conn.end_time is not None:
                self.logStateUpdate(conn, 'RESERVE START')
                self.scheduler.scheduleCall(conn.connection_id, conn.end_time,
                                            self._doEndtime, conn)
                td = conn.end_time - datetime.datetime.utcnow()
                log.msg(
                    'Connection %s: terminate scheduled for %s UTC (%i seconds)'
                    %
                    (conn.connection_id, conn.end_time.replace(microsecond=0),
                     td.total_seconds()),
                    system=self.log_system)

        except Exception as e:
            log.msg('Error in doReserveRollback: %s: %s' % (type(e), e),
                    system=self.log_system)
            log.err(e)

    @defer.inlineCallbacks
    def _doActivate(self, conn):

        src_target = self.connection_manager.getTarget(conn.source_port,
                                                       conn.source_label)
        dst_target = self.connection_manager.getTarget(conn.dest_port,
                                                       conn.dest_label)
        try:
            log.msg('Connection %s: Activating data plane...' %
                    conn.connection_id,
                    system=self.log_system)
            yield self.connection_manager.setupLink(conn.connection_id,
                                                    src_target, dst_target,
                                                    conn.bandwidth)
        except Exception, e:
            # We need to mark failure in state machine here somehow....
            #log.err(e) # note: this causes error in tests
            log.msg('Connection %s: Error activating data plane: %s' %
                    (conn.connection_id, str(e)),
                    system=self.log_system)
            # should include stack trace
            conn.data_plane_active = False
            yield conn.save()

            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            now = datetime.datetime.utcnow()
            service_ex = None
            self.parent_requester.errorEvent(header, conn.connection_id,
                                             self.getNotificationId(), now,
                                             'activateFailed', None,
                                             service_ex)

            defer.returnValue(None)

        try:
            conn.data_plane_active = True
            yield conn.save()
            log.msg('Connection %s: Data plane activated' %
                    (conn.connection_id),
                    system=self.log_system)

            # we might have passed end time during activation...
            end_time = conn.end_time
            now = datetime.datetime.utcnow()
            if end_time is not None and end_time < now:
                log.msg(
                    'Connection %s: passed end time during activation, scheduling immediate teardown.'
                    % conn.connection_id,
                    system=self.log_system)
                end_time = now

            if end_time is not None:
                self.scheduler.scheduleCall(conn.connection_id, end_time,
                                            self._doEndtime, conn)
                td = end_time - datetime.datetime.utcnow()
                log.msg(
                    'Connection %s: End and teardown scheduled for %s UTC (%i seconds)'
                    % (conn.connection_id, end_time.replace(microsecond=0),
                       td.total_seconds()),
                    system=self.log_system)

            data_plane_status = (True, conn.revision, True
                                 )  # active, version, consistent
            now = datetime.datetime.utcnow()
            header = nsa.NSIHeader(
                conn.requester_nsa, conn.requester_nsa
            )  # The NSA is both requester and provider in the backend, but this might be problematic without aggregator
            self.parent_requester.dataPlaneStateChange(
                header, conn.connection_id, self.getNotificationId(), now,
                data_plane_status)
        except Exception, e:
            log.msg('Error in post-activation: %s: %s' % (type(e), e),
                    system=self.log_system)
            log.err(e)