def allocate(self, urn, rspec_string, expiration, options={}): xrn = Xrn(urn) aggregate = OSAggregate(self) # assume first user is the caller and use their context # for the ec2/euca api connection. Also, use the first users # key as the project key. key_name = None if len(users) > 1: key_name = aggregate.create_instance_key(xrn.get_hrn(), users[0]) # collect public keys users = options.get('geni_users', []) pubkeys = [] for user in users: pubkeys.extend(user['keys']) rspec = RSpec(rspec_string) instance_name = hrn_to_os_slicename(slice_hrn) tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() slivers = aggregate.run_instances(instance_name, tenant_name, \ rspec_string, key_name, pubkeys) # update all sliver allocation states setting then to geni_allocated sliver_ids = [sliver.id for sliver in slivers] dbsession = self.api.dbsession() SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned', dbsession) return aggregate.describe(urns=[urn], version=rspec.version)
def perform_operational_action(self, urns, action, options={}): aggregate = OSAggregate(self) action = action.lower() if action == 'geni_start': action_method = aggregate.start_instances elif action == 'geni_stop': action_method = aggregate.stop_instances elif action == 'geni_restart': action_method = aggreate.restart_instances else: raise UnsupportedOperation(action) # fault if sliver is not full allocated (operational status is geni_pending_allocation) description = self.describe(urns, None, options) for sliver in description['geni_slivers']: if sliver['geni_operational_status'] == 'geni_pending_allocation': raise UnsupportedOperation( action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)" ) # # Perform Operational Action Here # instances = aggregate.get_instances(urns) for instance in instances: tenant_name = self.driver.shell.auth_manager.client.tenant_name action_method(tenant_name, instance.name, instance.id) description = self.describe(urns) geni_slivers = self.describe(urns, None, options)['geni_slivers'] return geni_slivers
def register_authority(self, sfa_record, hrn): aggregate = OSAggregate(self) auth_name = OSXrn(xrn=hrn, type='authority').get_tenant_name() description = sfa_record.get('description', None) # Create a authority tenant auth_tenant = aggregate.create_tenant(tenant_name=auth_name, description=description) return auth_tenant
def register_user(self, sfa_record, hrn, pub_key): aggregate = OSAggregate(self) # Get the authority tenant info for initialization of a user auth_hrn = Xrn(hrn).get_authority_hrn() auth_tenant_name = OSXrn(xrn=auth_hrn, type='authority').get_tenant_name() auth_tenant = self.shell.auth_manager.tenants.find(name=auth_tenant_name) # Create a user based on the auth tenant user_name = sfa_record.get('hrn') email = sfa_record.get('email', None) user = aggregate.create_user(user_name=user_name, password=user_name, \ tenant_id=auth_tenant.id, email=email, enabled=True) keys = sfa_record.get('keys', []) for key in keys: keyname = OSXrn(xrn=hrn, type='user').get_slicename() # Update connection for the current user self.shell.compute_manager.connect(username=user.name, tenant=auth_tenant.name, \ password=user.name) self.shell.compute_manager.keypairs.create(name=keyname, public_key=key) # Update initial connection info self.init_compute_manager_conn() return user
def provision(self, urns, options=None): if options is None: options={} # update sliver allocation states and set them to geni_provisioned aggregate = OSAggregate(self) # Update connection for the current client xrn = Xrn(urns[0], type='slice') user_name = xrn.get_authority_hrn() + '.' + xrn.leaf.split('-')[0] tenant_name = OSXrn(xrn=urns[0], type='slice').get_hrn() self.shell.compute_manager.connect(username=user_name, tenant=tenant_name, password=user_name) instances = aggregate.get_instances(xrn) # Allocate new floating IP per the instance servers = aggregate.check_floatingip(instances, True) aggregate.create_floatingip(tenant_name, servers) sliver_ids=[] for instance in instances: sliver_id = OSXrn(name=('koren'+'.'+ instance.name), id=instance.id, type='node+openstack').get_urn() sliver_ids.append(sliver_id) dbsession=self.api.dbsession() SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned', dbsession) version_manager = VersionManager() rspec_version = version_manager.get_version(options['geni_rspec_version']) return self.describe(urns, rspec_version, options=options)
def list_resources (self, slice_urn, slice_hrn, creds, options): cached_requested = options.get('cached', True) version_manager = VersionManager() # get the rspec's return format from options rspec_version = version_manager.get_version(options.get('geni_rspec_version')) version_string = "rspec_%s" % (rspec_version) #panos adding the info option to the caching key (can be improved) if options.get('info'): version_string = version_string + "_"+options.get('info', 'default') # look in cache first if cached_requested and self.cache and not slice_hrn: rspec = self.cache.get(version_string) if rspec: logger.debug("OpenStackDriver.ListResources: returning cached advertisement") return rspec #panos: passing user-defined options #print "manager options = ",options aggregate = OSAggregate(self) rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, options=options) # cache the result if self.cache and not slice_hrn: logger.debug("OpenStackDriver.ListResources: stores advertisement in cache") self.cache.add(version_string, rspec) return rspec
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options): aggregate = OSAggregate(self) # assume first user is the caller and use their context # for the ec2/euca api connection. Also, use the first users # key as the project key. key_name = None if len(users) > 1: key_name = aggregate.create_instance_key(slice_hrn, users[0]) # collect public keys pubkeys = [] for user in users: pubkeys.extend(user['keys']) rspec = RSpec(rspec_string) instance_name = hrn_to_os_slicename(slice_hrn) tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() instances = aggregate.run_instances(instance_name, tenant_name, rspec_string, key_name, pubkeys) rspec_nodes = [] for instance in instances: rspec_nodes.append(aggregate.instance_to_rspec_node(slice_urn, instance)) version_manager = VersionManager() manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest') manifest_rspec = RSpec(version=manifest_version, user_options=options) manifest_rspec.version.add_nodes(rspec_nodes) return manifest_rspec.toxml()
def delete (self, urns, options=None): if options is None: options={} # collect sliver ids so we can update sliver allocation states after # we remove the slivers. aggregate = OSAggregate(self) instances = aggregate.get_instances(urns) sliver_ids = [] for instance in instances: sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id) sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn) # delete the instance aggregate.delete_instance(instance) # delete sliver allocation states dbsession=self.api.dbsession() SliverAllocation.delete_allocations(sliver_ids, dbsession) # return geni_slivers geni_slivers = [] for sliver_id in sliver_ids: geni_slivers.append( {'geni_sliver_urn': sliver['sliver_id'], 'geni_allocation_status': 'geni_unallocated', 'geni_expires': None}) return geni_slivers
def allocate (self, urn, rspec_string, expiration, options=None): if options is None: options={} xrn = Xrn(urn) aggregate = OSAggregate(self) # assume first user is the caller and use their context # for the ec2/euca api connection. Also, use the first users # key as the project key. key_name = None if len(users) > 1: key_name = aggregate.create_instance_key(xrn.get_hrn(), users[0]) # collect public keys users = options.get('geni_users', []) pubkeys = [] for user in users: pubkeys.extend(user['keys']) rspec = RSpec(rspec_string) instance_name = hrn_to_os_slicename(slice_hrn) tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() slivers = aggregate.run_instances(instance_name, tenant_name, \ rspec_string, key_name, pubkeys) # update all sliver allocation states setting then to geni_allocated sliver_ids = [sliver.id for sliver in slivers] dbsession=self.api.dbsession() SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',dbsession) return aggregate.describe(urns=[urn], version=rspec.version)
def status (self, urns, options=None): if options is None: options={} aggregate = OSAggregate(self) desc = aggregate.describe(urns) status = {'geni_urn': desc['geni_urn'], 'geni_slivers': desc['geni_slivers']} return status
def delete(self, urns, options={}): # collect sliver ids so we can update sliver allocation states after # we remove the slivers. aggregate = OSAggregate(self) instances = aggregate.get_instances(urns) sliver_ids = [] for instance in instances: sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id) sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn) # delete the instance aggregate.delete_instance(instance) # delete sliver allocation states dbsession = self.api.dbsession() SliverAllocation.delete_allocations(sliver_ids, dbsession) # return geni_slivers geni_slivers = [] for sliver_id in sliver_ids: geni_slivers.append({ 'geni_sliver_urn': sliver['sliver_id'], 'geni_allocation_status': 'geni_unallocated', 'geni_expires': None }) return geni_slivers
def perform_operational_action (self, urns, action, options=None): if options is None: options={} aggregate = OSAggregate(self) action = action.lower() if action == 'geni_start': action_method = aggregate.start_instances elif action == 'geni_stop': action_method = aggregate.stop_instances elif action == 'geni_restart': action_method = aggreate.restart_instances else: raise UnsupportedOperation(action) # fault if sliver is not full allocated (operational status is geni_pending_allocation) description = self.describe(urns, None, options) for sliver in description['geni_slivers']: if sliver['geni_operational_status'] == 'geni_pending_allocation': raise UnsupportedOperation(action, \ "Sliver must be fully allocated (operational status is not geni_pending_allocation)") # # Perform Operational Action Here # xrn = Xrn(urns[0], type='slice') instances = aggregate.get_instances(xrn) for instance in instances: tenant_name = self.shell.auth_manager.client.tenant_name action_method(tenant_name, instance.name, instance.id) description = self.describe(urns) geni_slivers = self.describe(urns, None, options)['geni_slivers'] return geni_slivers
def status(self, urns, options={}): aggregate = OSAggregate(self) desc = aggregate.describe(urns) status = { 'geni_urn': desc['geni_urn'], 'geni_slivers': desc['geni_slivers'] } return status
def status (self, urns, options=None): if options is None: options={} aggregate = OSAggregate(self) # TODO: Change to more dynamic version_manager = VersionManager() version_dict = {'type':'KOREN', 'version':'1', 'content_type':'manifest'} version = version_manager.get_version(version_dict) desc = aggregate.describe(urns, version=version, options=options) status = {'geni_urn': desc['geni_urn'], 'geni_slivers': desc['geni_slivers']} return status
def provision(self, urns, options={}): # update sliver allocation states and set them to geni_provisioned aggregate = OSAggregate(self) instances = aggregate.get_instances(urns) sliver_ids = [] for instance in instances: sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id) sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn) dbsession=self.api.dbsession() SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',dbsession) version_manager = VersionManager() rspec_version = version_manager.get_version(options['geni_rspec_version']) return self.describe(urns, rspec_version, options=options)
def provision(self, urns, options={}): # update sliver allocation states and set them to geni_provisioned aggregate = OSAggregate(self) instances = aggregate.get_instances(urns) sliver_ids = [] for instance in instances: sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id) sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn) dbsession = self.api.dbsession() SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned', dbsession) version_manager = VersionManager() rspec_version = version_manager.get_version( options['geni_rspec_version']) return self.describe(urns, rspec_version, options=options)
def register_slice(self, sfa_record, hrn): aggregate = OSAggregate(self) # Get the user names (SFA client names) users = [] researchers = sfa_record.get('reg-researchers', []) pi = sfa_record.get('pi') if len(researchers): for researcher in researchers: name = OSXrn(xrn=researcher, type='user').get_hrn() users.append(name) elif pi: name = OSXrn(xrn=pi, type='user').get_hrn() users.append(name) else: logger.warnning("You should input options with researcher(s) or pi.") users = list(set(users)) # Check if this is username-slicename or not # TODO: for now, we just support 1 user to make tenant if sfa_record.get('hrn').find('-') == -1: tenant_name = ( OSXrn(xrn=sfa_record.get('hrn'), type='slice').get_authority_hrn() + '.' \ + OSXrn(xrn=users[0], type='user').get_leaf() + '-' \ + OSXrn(xrn=sfa_record.get('hrn'), type='slice').get_leaf() ) else: tenant_name = ( OSXrn(xrn=sfa_record.get('hrn'), type='slice').get_authority_hrn() + '.' \ + OSXrn(xrn=sfa_record.get('hrn'), type='slice').get_leaf() ) description = sfa_record.get('description', None) tenant = aggregate.create_tenant(tenant_name, description) # Add suitable roles to the user admin_role = self.shell.auth_manager.roles.find(name='admin') member_role = self.shell.auth_manager.roles.find(name='_member_') if len(researchers): for researcher in researchers: researcher_name = OSXrn(xrn=researcher, type='user').get_hrn() user = self.shell.auth_manager.users.find(name=researcher_name) if self.shell.auth_manager.roles.roles_for_user(user, tenant).count(member_role) == 0: self.shell.auth_manager.roles.add_user_role(user, member_role, tenant) elif pi: pi_name = OSXrn(xrn=pi, type='user').get_hrn() user = self.shell.auth_manager.users.find(name=pi_name) if self.shell.auth_manager.roles.roles_for_user(user, tenant).count(admin_role) == 0: self.shell.auth_manager.roles.add_user_role(user, admin_role, tenant) else: logger.warnning("You should input options with researcher(s) or pi.") return tenant
def register_federation(self, user_hrn, slice_hrn, keys, email=None): aggregate = OSAggregate(self) # Create a slice of the federation user tenant = aggregate.create_tenant(tenant_name=slice_hrn, description=user_hrn) # Create a user of the federation user user = aggregate.create_user(user_name=user_hrn, password=user_hrn, \ tenant_id=tenant.id, email=email, enabled=True) # Check if the user has roles or not member_role = self.shell.auth_manager.roles.find(name='Member') if self.shell.auth_manager.roles.roles_for_user(user, tenant).count(member_role) == 0: self.shell.auth_manager.roles.add_user_role(user, member_role, tenant) # Check if keys exist or not if keys is not None: # Check if the user has keys or not if len(keys) < 1: key = None else: key = keys[0] keyname = OSXrn(xrn=user_hrn, type='user').get_slicename() # Update connection for the current client self.shell.compute_manager.connect(username=user.name, tenant=tenant.name, password=user_hrn) keypair_list = self.shell.compute_manager.keypairs.list() for keypair in keypair_list: if keyname == keypair.name: break else: self.shell.compute_manager.keypairs.create(name=keyname, public_key=key) # Update initial connection info self.init_compute_manager_conn() logger.info( "The federation user[%s] has the slice[%s] as member role." % \ (user.name, tenant.name) ) return user
def describe(self, urns, version=None, options={}): aggregate = OSAggregate(self) return aggregate.describe(urns, version=version, options=options)
def delete(self, urns, options=None): if options is None: options={} aggregate = OSAggregate(self) # Update connection for the current client xrn = Xrn(urns[0], type='slice') user_name = xrn.get_authority_hrn() + '.' + xrn.leaf.split('-')[0] tenant_name = OSXrn(xrn=urns[0], type='slice').get_hrn() self.shell.compute_manager.connect(username=user_name, tenant=tenant_name, password=user_name) # collect sliver ids so we can update sliver allocation states after # we remove the slivers. instances = aggregate.get_instances(xrn) # Release the floating IPs of instances servers = aggregate.check_floatingip(instances, False) aggregate.delete_floatingip(servers) sliver_ids = [] id_set = set() for instance in instances: sliver_id = OSXrn(name=('koren'+'.'+ instance.name), id=instance.id, type='node+openstack').get_urn() sliver_ids.append(sliver_id) # delete the instance related with requested tenant aggregate.delete_instance(instance) id_set.add(instance.tenant_id) tenant_ids = list(id_set) for tenant_id in tenant_ids: # Delete both the router(s) and interfaces related with requested tenant aggregate.delete_router(tenant_id=tenant_id) # Delete both the network and subnet related with requested tenant aggregate.delete_network(tenant_id=tenant_id) # Delete sliver allocation states dbsession=self.api.dbsession() SliverAllocation.delete_allocations(sliver_ids, dbsession) # Return geni_slivers geni_slivers = [] for sliver_id in sliver_ids: geni_slivers.append( { 'geni_sliver_urn': sliver_id, 'geni_allocation_status': 'geni_unallocated', # 'geni_expires': datetime_to_string(utcparse(time.time())) }) 'geni_expires': None }) return geni_slivers
def delete_sliver (self, slice_urn, slice_hrn, creds, options): aggregate = OSAggregate(self) tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() project_name = hrn_to_os_slicename(slice_hrn) return aggregate.delete_instances(project_name, tenant_name)
def allocate (self, urn, rspec_string, expiration, options=None): if options is None: options={} aggregate = OSAggregate(self) rspec = RSpec(rspec_string) xrn = Xrn(urn) slice_hrn = xrn.get_hrn() tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_hrn() instance_name = hrn_to_os_slicename(slice_hrn) tenants = self.shell.auth_manager.tenants.findall() # collect public keys & get the user name users = options.get('geni_users', []) pubkeys = [] key_name = None if len(users) >= 1: for user in users: # TODO: We currently support one user name. user_name = Xrn(user.get('urn')).get_hrn() pubkeys.extend(user['keys']) for tenant in tenants: # Check if the tenant of the user exists in local OS or not if tenant_name == tenant.name: try: self.shell.auth_manager.users.find(name=user_name) except: user = self.register_federation(user_hrn=user_name, \ slice_hrn=tenant_name, keys=pubkeys, email=None) break else: user = self.register_federation(user_hrn=user_name, \ slice_hrn=tenant_name, keys=None, email=None) # Update connection for the current client self.shell.compute_manager.connect(username=user_name, tenant=tenant_name, password=user_name) keypair_list = self.shell.compute_manager.keypairs.list() keyname = OSXrn(xrn=user_name, type='user').get_slicename() for keypair in keypair_list: if keyname == keypair.name: key_name = keypair.name break else: raise SfaNotImplemented("No handle!") # Update initial connection info self.init_compute_manager_conn() # key_name = aggregate.create_instance_key(slice_hrn, users[0]) # In case of federation or non-options elif len(users) < 1: if options.get('actual_caller_hrn') is None: user_name = xrn.get_authority_hrn() + '.' + xrn.leaf.split('-')[0] else: user_name = options.get('actual_caller_hrn') for tenant in tenants: # Check if the tenant of the user in local OS or not if tenant_name == tenant.name: try: self.shell.auth_manager.users.find(name=user_name) except: user = self.register_federation(user_hrn=user_name, \ slice_hrn=tenant_name, keys=pubkeys, email=None) break else: user = self.register_federation(user_hrn=user_name, \ slice_hrn=tenant_name, keys=None, email=None) # TODO: Wrapper for federation needs at least one pubkey of the user extracted by 'options'!! # name = OSXrn(xrn=user_name, type='user').get_slicename() # key_name = self.shell.compute_manager.keypairs.get(name).name else: raise SfaNotImplemented("No handle!") slivers = aggregate.run_instances(tenant_name, user_name, rspec_string, key_name, pubkeys) # Update sliver allocations for sliver in slivers: component_id = sliver.metadata.get('component_id') sliver_id = OSXrn(name=('koren'+'.'+ sliver.name), id=sliver.id, type='node+openstack').get_urn() record = SliverAllocation( sliver_id=sliver_id, component_id=component_id, allocation_state='geni_allocated') record.sync(self.api.dbsession()) return aggregate.describe(urns=[urn], version=rspec.version)
def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options): name = hrn_to_os_slicename(slice_hrn) tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() aggregate = OSAggregate(self) return aggregate.update_instances(name)
def describe(self, urns, version=None, options=None): if options is None: options={} aggregate = OSAggregate(self) return aggregate.describe(urns, version=version, options=options)
def list_resources (self, version=None, options=None): if options is None: options={} aggregate = OSAggregate(self) rspec = aggregate.list_resources(version=version, options=options) return rspec
def stop_slice (self, slice_urn, slice_hrn, creds): tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() name = OSXrn(xrn=slice_urn).name aggregate = OSAggregate(self) return aggregate.stop_instances(name, tenant_name)
def list_resources(self, version=None, options={}): aggregate = OSAggregate(self) rspec = aggregate.list_resources(version=version, options=options) return rspec