def renew_slivers(self, slice_object, sliver_objects, creds, expiration_time, options): """ AM API V3 method. Set the expiration time of the specified slivers to the specified value. If the slice credentials expire before the specified expiration time, set sliver expiration times to the slice credentials expiration time. """ expiration = utils.min_expire(creds, self._max_lease_time, expiration_time) # Lock this slice so nobody else can mess with it while we renew with slice_object.getLock(): for sliver in sliver_objects: sliver.setExpiration(expiration) # Create a sliver status list for the slivers that were renewed sliver_status_list = \ utils.SliverList().getStatusOfSlivers(sliver_objects) requested = utils._naiveUTC(dateutil.parser.parse(expiration_time)) # If geni_extend_alap option provided, use the earlier # of the requested time and max expiration as the expiration time if 'geni_extend_alap' in options and options['geni_extend_alap']: if expiration < requested: slice_urn = slice_object.getSliceURN() config.logger.info( "Got geni_extend_alap: revising slice %s renew request from %s to %s" % (slice_urn, requested, expiration)) requested = expiration if requested > expiration: config.logger.info('expiration time too long') code = {'geni_code': constants.REQUEST_PARSE_FAILED} return { 'code': code, 'value': sliver_status_list, 'output': 'ERROR: Requested sliver expiration is greater than either the slice expiration or the maximum lease time: ' + str(config.lease_expiration_minutes) + ' minutes' } code = {'geni_code': constants.SUCCESS} return {'code': code, 'value': sliver_status_list, 'output': ''}
def describe(self, slice_object, slivers, options): """ AM API V3 method. Describe the status of the resources allocated to this slice. """ # Lock this slice so nobody else can mess with it while we get status with slice_object.getLock(): open_stack_interface.updateOperationalStatus(slice_object) # Get the status of the slivers sliver_status_list = \ utils.SliverList().getStatusOfSlivers(slivers) # Generate the manifest to be returned manifest, error_string, error_code = \ rspec_handler.generateManifestForSlivers(slice_object, slivers, False, False, self._aggregate_urn, self._stitching) if error_code != constants.SUCCESS: return { 'code': { 'geni_code': error_code }, 'value': "", 'output': error_string } # Generate the return struct code = {'geni_code': constants.SUCCESS} result_struct = { 'geni_rspec': manifest, 'geni_urn': slice_object.getSliceURN(), 'geni_slivers': sliver_status_list } ret_val = {'code': code, 'value': result_struct, 'output': ''} return ret_val
def status(self, slice_object, slivers, options): """ AM API V3 method. Return the status of the specified slivers """ # Lock this slice so nobody else can mess with it while we get status with slice_object.getLock(): open_stack_interface.updateOperationalStatus(slice_object) # Create a list with the status of the specified slivers sliver_status_list = \ utils.SliverList().getStatusOfSlivers(slivers) # Generate the return stuct code = {'geni_code': constants.SUCCESS} result_struct = {'geni_urn':slice_object.getSliceURN(), \ 'geni_slivers':sliver_status_list} return {'code': code, 'value': result_struct, 'output': ''}
def delete(self, slice_object, sliver_objects, options): """ AM API V3 method. Delete the specified sliver_objects. All sliver_objecs are associated with the same slice_object. """ config.logger.info('Delete called for slice %r' % \ slice_object.getSliceURN()) # Lock this slice so nobody else can mess with it while we do the deletes with slice_object.getLock(): # Delete any slivers that have been provisioned # First find the sliver_objects that have been provisioned. # Provisioned slivers need their OpenStack resources deleted. # Other slivers just need their allocation and operational states # changed. provisioned_slivers = [] for sliver in sliver_objects: if sliver.getAllocationState() == constants.provisioned: provisioned_slivers.append(sliver) else: # Sliver has not been provisioned. Just change its # allocation and operational states sliver.setAllocationState(constants.unallocated) sliver.setOperationalState(constants.stopping) # Delete provisioned slivers success = open_stack_interface.deleteSlivers( slice_object, provisioned_slivers) sliver_status_list = \ utils.SliverList().getStatusOfSlivers(sliver_objects) # Remove deleted slivers from the slice for sliver in sliver_objects: slice_object.removeSliver(sliver) ### THIS CODE SHOULD BE MOVED TO EXPIRE WHEN WE ACTUALLY EXPIRE ### SLIVERS AND SLICES. SLICES SHOULD BE DELETED ONLY WHEN THEY ### EXPIRE. FOR NOW WE DELETE THEM WHEN ALL THEIR SLIVERS ARE ### DELETED. if len(slice_object.getSlivers()) == 0: open_stack_interface.expireSlice(slice_object) # Update VMOC self.registerSliceToVMOC(slice_object, False) # Remove slice from GRAM SliceURNtoSliceObject.remove_slice_object( slice_object.getSliceURN()) # Free all stitching VLAN allocations for sliver in sliver_objects: self._stitching.deleteAllocation(sliver.getSliverURN()) # Free all internal vlans back to pool for sliver in sliver_objects: if isinstance(sliver, NetworkLink): tag = sliver.getVLANTag() if self._internal_vlans.isAllocated(tag): self._internal_vlans.free(tag) # Persist new GramManager state self.persist_state() # Generate the return struct code = {'geni_code': constants.SUCCESS} if success: return { 'code': code, 'value': sliver_status_list, 'output': '' } else: return { 'code': code, 'value': sliver_status_list, 'output': 'Failed to delete one or more slivers' }
def performOperationalAction(self, slice_object, slivers, action, options): """ AM API V3 method. Support these actions: geni_start (boot if not_ready) geni_restart (reboot if ready) geni_stop (shutdown if ready) """ ret_str = "" if action == 'delete_snapshot': ret_code, ret_str = open_stack_interface._deleteImage(options) sliver_status_list = utils.SliverList().getStatusOfSlivers(slivers) ret_val = { 'code': { 'geni_code': ret_code }, 'value': "", 'output': ret_str } GramImageInfo.refresh() return ret_val elif action == 'create_snapshot': if not options['snapshot_name'] or not options['vm_name']: ret_code = constants.REQUEST_PARSE_FAILED ret_str = "Must specify vm_name and snapshot_name in output file" else: ret_code, ret_str = open_stack_interface._createImage( slivers, options) ret_val = { 'code': { 'geni_code': ret_code }, 'value': "", 'output': ret_str } GramImageInfo.refresh() return ret_val elif action in ["geni_start", "geni_stop", "geni_restart"]: ret_str = "" for sliver_object in slivers: # Only perform operational actions on VMs if not isinstance(sliver_object, VirtualMachine): continue # Perform operational action on VM within openstack ret = open_stack_interface._performOperationalAction( sliver_object, action, options) if not ret: ret_str += "Failed to perform " + action + " on " + sliver_object.getName( ) + "\n" else: ret_str = "Operation not supported" if not len(ret_str): code = {'geni_code': constants.SUCCESS} else: code = {'geni_code': constants.REQUEST_PARSE_FAILED} sliver_status_list = \ utils.SliverList().getStatusOfSlivers(slivers) ret_val = { 'code': code, 'value': sliver_status_list, 'output': ret_str } return ret_val
def provision(self, slice_object, sliver_objects, creds, options): """ AM API V3 method. Provision the slivers listed in sliver_objects, if they have not already been provisioned. """ if len(sliver_objects) == 0: # No slivers specified: Return error message code = {'geni_code': constants.REQUEST_PARSE_FAILED} err_str = 'No slivers to be provisioned.' return {'code': code, 'value': '', 'output': err_str} # Make sure slivers have been allocated before we provision them. # Return an error if even one of the slivers has not been allocated for sliver in sliver_objects: if sliver.getAllocationState() != constants.allocated: # Found a sliver that has not been allocated. Return with error. code = {'geni_code': constants.REQUEST_PARSE_FAILED} err_str = 'Slivers to be provisioned must have allocation state geni_allocated' return {'code': code, 'value': '', 'output': err_str} # See if the geni_users option has been set. This option is used to # specify user accounts to be created on virtual machines that are # provisioned by this call if options.has_key('geni_users'): users = options['geni_users'] else: users = list() # Lock this slice so nobody else can mess with it during provisioning with slice_object.getLock(): err_str = open_stack_interface.provisionResources( slice_object, sliver_objects, users, self) if err_str != None: # We failed to provision this slice for some reason (described # in err_str) code = {'geni_code': constants.OPENSTACK_ERROR} self.delete(slice_object, sliver_objects, options) return {'code': code, 'value': '', 'output': err_str} # Set expiration times on the provisioned resources # Set expiration times on the allocated resources expiration = utils.min_expire( creds, self._max_lease_time, 'geni_end_time' in options and options['geni_end_time']) for sliver in sliver_objects: sliver.setExpiration(expiration) # Generate a manifest rpsec req_rspec = slice_object.getRequestRspec() manifest, error_string, error_code = \ rspec_handler.generateManifestForSlivers(slice_object, sliver_objects, True, False, self._aggregate_urn, self._stitching) if error_code != constants.SUCCESS: return { 'code': { 'geni_code': error_code }, 'value': "", 'output': error_string } # Create a sliver status list for the slivers that were provisioned sliver_status_list = \ utils.SliverList().getStatusOfSlivers(sliver_objects) # Persist new GramManager state self.persist_state() # Report the new slice to VMOC self.registerSliceToVMOC(slice_object) # Generate the return struct code = {'geni_code': constants.SUCCESS} result_struct = {'geni_rspec':manifest, \ 'geni_slivers':sliver_status_list} return {'code': code, 'value': result_struct, 'output': ''}
def allocate(self, slice_urn, creds, rspec, options): """ AM API V3 method. Request reservation of GRAM resources. We assume that by the time we get here the caller's credentials have been verified by the gcf framework (see am3.py). Returns None if successful. Returns an error string on failure. """ config.logger.info('Allocate called for slice %r' % slice_urn) # Grab user urn out of slice credentail user_urn = None if len(creds) == 1: user_urn = creds[0].gidCaller.urn # Check if we already have slivers for this slice slice_object = SliceURNtoSliceObject.get_slice_object(slice_urn) if slice_object == None: # This is a new slice at this aggregate. Create Slice object # and add it the list of slices at this AM slice_object = Slice(slice_urn) SliceURNtoSliceObject.set_slice_object(slice_urn, slice_object) # Lock this slice so nobody else can mess with it during allocation with slice_object.getLock(): # Parse the request rspec. Get back any error message from parsing # the rspec and a list of slivers created while parsing # Also OF controller, if any err_output, err_code, slivers, controller_link_info = \ rspec_handler.parseRequestRspec(self._aggregate_urn, slice_object, rspec, self._stitching) if err_output != None: # Something went wrong. First remove from the slice any sliver # objects created while parsing the bad rspec self.cleanup_slivers(slivers, slice_object) # Return an error struct. code = {'geni_code': err_code} return {'code': code, 'value': '', 'output': err_output} # If we're associating an OpenFlow controller to # any link of this slice, # Each VM must go on its own host. If there are more nodes # than hosts, we fail if len(controller_link_info) > 0: hosts = open_stack_interface._listHosts('compute') num_vms = 0 for sliver in slivers: if isinstance(sliver, VirtualMachine): num_vms = num_vms + 1 if len(hosts) < num_vms: # Fail: More VMs requested than compute hosts # on rack. Remove from this slice the sliver # objects created during this call to allocate # before returning an error struct self.cleanup_slivers(slivers, slice_object) code = {'geni_code': constants.REQUEST_PARSE_FAILED} error_output = \ "For OpenFlow controlled slice, limit of " + \ str(len(hosts)) + " VM's" return {'code': code, 'value': '', 'output': error_output} # Set the experimenter provider controller URL (if any) for link_object in slice_object.getNetworkLinks(): link_name = link_object.getName() if link_name in controller_link_info: controller_url_for_link = controller_link_info[link_name] link_object.setControllerURL(controller_url_for_link) # Set expiration times on the allocated resources expiration = utils.min_expire( creds, self._max_alloc_time, 'geni_end_time' in options and options['geni_end_time']) for sliver in slivers: sliver.setExpiration(expiration) # Set expiration time on the slice itself slice_object.setExpiration(expiration) # Associate an external VLAN tag with every # stitching link # print 'allocating external vlan' # Allocate external vlans and set them on the slivers is_v2_allocation = 'AM_API_V2' in options for link_sliver_object in slice_object.getNetworkLinks(): success, error_string, error_code = \ self._stitching.allocate_external_vlan_tags(link_sliver_object, \ rspec, is_v2_allocation) if not success: self.cleanup_slivers(slivers, slice_object) return { 'code': { 'geni_code': error_code }, 'value': "", 'output': error_string } # Associate an internal VLAN tag with every link # that isn't already set by stitching # print 'allocating internal vlan' if not self.allocate_internal_vlan_tags(slice_object): self.cleanup_slivers(slivers, slice_object) error_string = "No more internal VLAN tags available" error_code = constants.VLAN_UNAVAILABLE return { 'code': { 'geni_code': error_code }, 'value': "", 'output': error_string } # Generate a manifest rspec slice_object.setRequestRspec(rspec) for sliver in slivers: sliver.setRequestRspec(rspec) agg_urn = self._aggregate_urn # At this point, we don't allocate VLAN's: they should already be allocated manifest, error_string, error_code = \ rspec_handler.generateManifestForSlivers(slice_object, \ slivers, True, \ False, agg_urn, \ self._stitching) if error_code != constants.SUCCESS: self.cleanup_slivers(slivers, slice_object) return { 'code': { 'geni_code': error_code }, 'value': "", 'output': error_string } slice_object.setManifestRspec(manifest) # Set the user urn for all new slivers all_slice_slivers = slice_object.getAllSlivers() for sliver_urn in all_slice_slivers: sliver = all_slice_slivers[sliver_urn] if not sliver.getUserURN(): sliver.setUserURN(user_urn) # Persist aggregate state self.persist_state() # Create a sliver status list for the slivers allocated by this call sliver_status_list = \ utils.SliverList().getStatusOfSlivers(slivers) # Generate the return struct code = {'geni_code': constants.SUCCESS} result_struct = { 'geni_rspec': manifest, 'geni_slivers': sliver_status_list } return {'code': code, 'value': result_struct, 'output': ''}