def _doReserve(self, conn, correlation_id): # we have already checked resource availability, so can progress directly through checking state.reserveMultiSwitch(conn, state.RESERVE_CHECKING, state.RESERVE_HELD) yield conn.save() self.logStateUpdate(conn, 'RESERVE CHECKING/HELD') # schedule 2PC timeout if self.scheduler.hasScheduledCall(conn.connection_id): # this means that the build scheduler made a call while we yielded self.scheduler.cancelCall(conn.connection_id) now = datetime.datetime.utcnow() timeout_time = min(now + datetime.timedelta(seconds=self.TPC_TIMEOUT), conn.end_time) self.scheduler.scheduleCall(conn.connection_id, timeout_time, self._doReserveTimeout, conn) td = timeout_time - datetime.datetime.utcnow() log.msg('Connection %s: reserve abort scheduled for %s UTC (%i seconds)' % (conn.connection_id, timeout_time.replace(microsecond=0), td.total_seconds()), system=self.log_system) schedule = nsa.Schedule(conn.start_time, conn.end_time) sc_source_stp = nsa.STP(conn.source_network, conn.source_port, conn.source_label) sc_dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) sd = nsa.Point2PointService(sc_source_stp, sc_dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, None) # we fake some things due to db limitations crit = nsa.Criteria(conn.revision, schedule, sd) header = nsa.NSIHeader(conn.requester_nsa, conn.requester_nsa, correlation_id=correlation_id) # The NSA is both requester and provider in the backend, but this might be problematic without aggregator yield self.parent_requester.reserveConfirmed(header, conn.connection_id, conn.global_reservation_id, conn.description, crit)
def reserveCommit(self, header, connection_id): log.msg('ReserveCommit request from %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=self.log_system) conn = yield self._getConnection(connection_id, header.requester_nsa) if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED): raise error.ConnectionGoneError( 'Connection %s has been terminated') # the switch to reserve start and allocated must be in same transaction state.reserveMultiSwitch(conn, state.RESERVE_COMMITTING, state.RESERVE_START) conn.allocated = True yield conn.save() self.logStateUpdate(conn, 'COMMIT/RESERVED') # cancel abort and schedule end time call self.scheduler.cancelCall(connection_id) self.scheduler.scheduleCall(conn.connection_id, conn.end_time, self._doEndtime, conn) td = conn.end_time - datetime.datetime.utcnow() log.msg( 'Connection %s: End and teardown scheduled for %s UTC (%i seconds)' % (conn.connection_id, conn.end_time.replace(microsecond=0), td.total_seconds()), system=self.log_system) yield self.parent_requester.reserveCommitConfirmed( header, connection_id) defer.returnValue(connection_id)
def reserveCommit(self, header, connection_id, request_info=None): log.msg('ReserveCommit request from %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=self.log_system) conn = yield self._getConnection(connection_id, header.requester_nsa) self._authorize(conn.source_port, conn.dest_port, header, request_info) if conn.lifecycle_state in (state.TERMINATING, state.TERMINATED): raise error.ConnectionGoneError('Connection %s has been terminated') # the switch to reserve start and allocated must be in same transaction # state.reserveMultiSwitch will save the state, including the allocated flag conn.allocated = True yield state.reserveMultiSwitch(conn, state.RESERVE_COMMITTING, state.RESERVE_START) self.logStateUpdate(conn, 'COMMIT/RESERVED') # cancel abort and schedule end time call self.scheduler.cancelCall(connection_id) if conn.end_time is not None: self.scheduler.scheduleCall(conn.connection_id, conn.end_time, self._doEndtime, conn) td = conn.end_time - datetime.datetime.utcnow() log.msg('Connection %s: End and teardown scheduled for %s UTC (%i seconds)' % (conn.connection_id, conn.end_time.replace(microsecond=0), td.total_seconds()), system=self.log_system) yield self.parent_requester.reserveCommitConfirmed(header, connection_id) defer.returnValue(connection_id)
def _doReserve(self, conn, correlation_id): # we have already checked resource availability, so can progress directly through checking state.reserveMultiSwitch(conn, state.RESERVE_CHECKING, state.RESERVE_HELD) yield conn.save() self.logStateUpdate(conn, 'RESERVE CHECKING/HELD') # schedule 2PC timeout if self.scheduler.hasScheduledCall(conn.connection_id): # this means that the build scheduler made a call while we yielded self.scheduler.cancelCall(conn.connection_id) now = datetime.datetime.utcnow() timeout_time = min(now + datetime.timedelta(seconds=self.TPC_TIMEOUT), conn.end_time) self.scheduler.scheduleCall(conn.connection_id, timeout_time, self._doReserveTimeout, conn) td = timeout_time - datetime.datetime.utcnow() log.msg( 'Connection %s: reserve abort scheduled for %s UTC (%i seconds)' % (conn.connection_id, timeout_time.replace(microsecond=0), td.total_seconds()), system=self.log_system) schedule = nsa.Schedule(conn.start_time, conn.end_time) sc_source_stp = nsa.STP(conn.source_network, conn.source_port, conn.source_label) sc_dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) sd = nsa.Point2PointService( sc_source_stp, sc_dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, None) # we fake some things due to db limitations crit = nsa.Criteria(conn.revision, schedule, sd) header = nsa.NSIHeader( conn.requester_nsa, conn.requester_nsa, correlation_id=correlation_id ) # The NSA is both requester and provider in the backend, but this might be problematic without aggregator yield self.parent_requester.reserveConfirmed( header, conn.connection_id, conn.global_reservation_id, conn.description, crit)