def test_import_public_key(self): # test when user provides all values result1 = self.cloud.import_public_key(self.context, 'testimportkey1', 'mytestpubkey', 'mytestfprint') self.assertTrue(result1) keydata = db.key_pair_get(self.context, self.context.user.id, 'testimportkey1') self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) # test when user omits fingerprint pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') f = open(pubkey_path + '/dummy.pub', 'r') dummypub = f.readline().rstrip() f.close f = open(pubkey_path + '/dummy.fingerprint', 'r') dummyfprint = f.readline().rstrip() f.close result2 = self.cloud.import_public_key(self.context, 'testimportkey2', dummypub) self.assertTrue(result2) keydata = db.key_pair_get(self.context, self.context.user.id, 'testimportkey2') self.assertEqual(dummypub, keydata['public_key']) self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_get_by_name(self): self.mox.StubOutWithMock(db, 'key_pair_get') db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn( fake_keypair) self.mox.ReplayAll() keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user', 'foo-keypair') self.compare_obj(keypair_obj, fake_keypair)
def test_get_by_name(self): self.mox.StubOutWithMock(db, 'key_pair_get') db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn(fake_keypair) self.mox.ReplayAll() keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user', 'foo-keypair') self.compare_obj(keypair_obj, fake_keypair)
def create(self, req, body): """ Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: name (required) - string public_key (optional) - string """ context = req.environ['nova.context'] authorize(context) params = body['keypair'] name = params['name'] self._validate_keypair_name(name) if not 0 < len(name) < 256: msg = _('Keypair name must be between 1 and 255 characters long') raise webob.exc.HTTPBadRequest(explanation=msg) # NOTE(ja): generation is slow, so shortcut invalid name exception try: db.key_pair_get(context, context.user_id, name) msg = _("Key pair '%s' already exists.") % name raise webob.exc.HTTPConflict(explanation=msg) except exception.NotFound: pass keypair = {'user_id': context.user_id, 'name': name} if quota.allowed_key_pairs(context, 1) < 1: msg = _("Quota exceeded, too many key pairs.") raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, headers={'Retry-After': 0}) # import if public_key is sent if 'public_key' in params: try: fingerprint = crypto.generate_fingerprint(params['public_key']) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise webob.exc.HTTPBadRequest(explanation=msg) keypair['public_key'] = params['public_key'] keypair['fingerprint'] = fingerprint else: generated_key = self._gen_key() keypair['private_key'] = generated_key['private_key'] keypair['public_key'] = generated_key['public_key'] keypair['fingerprint'] = generated_key['fingerprint'] db.key_pair_create(context, keypair) return {'keypair': keypair}
def create(self, req, body): """ Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: name (required) - string public_key (optional) - string """ context = req.environ['nova.context'] authorize(context) params = body['keypair'] name = params['name'] self._validate_keypair_name(name) if not 0 < len(name) < 256: msg = _('Keypair name must be between 1 and 255 characters long') raise webob.exc.HTTPBadRequest(explanation=msg) # NOTE(ja): generation is slow, so shortcut invalid name exception try: db.key_pair_get(context, context.user_id, name) msg = _("Key pair '%s' already exists.") % name raise webob.exc.HTTPConflict(explanation=msg) except exception.NotFound: pass keypair = {'user_id': context.user_id, 'name': name} # import if public_key is sent if 'public_key' in params: try: fingerprint = crypto.generate_fingerprint(params['public_key']) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise webob.exc.HTTPBadRequest(explanation=msg) keypair['public_key'] = params['public_key'] keypair['fingerprint'] = fingerprint else: generated_key = self._gen_key() keypair['private_key'] = generated_key['private_key'] keypair['public_key'] = generated_key['public_key'] keypair['fingerprint'] = generated_key['fingerprint'] db.key_pair_create(context, keypair) return {'keypair': keypair}
def create(self, req, body): """ Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: name (required) - string public_key (optional) - string """ context = req.environ["nova.context"] authorize(context) params = body["keypair"] name = params["name"] self._validate_keypair_name(name) if not 0 < len(name) < 256: msg = _("Keypair name must be between 1 and 255 characters long") raise webob.exc.HTTPBadRequest(explanation=msg) # NOTE(ja): generation is slow, so shortcut invalid name exception try: db.key_pair_get(context, context.user_id, name) msg = _("Key pair '%s' already exists.") % name raise webob.exc.HTTPConflict(explanation=msg) except exception.NotFound: pass keypair = {"user_id": context.user_id, "name": name} # import if public_key is sent if "public_key" in params: try: fingerprint = crypto.generate_fingerprint(params["public_key"]) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise webob.exc.HTTPBadRequest(explanation=msg) keypair["public_key"] = params["public_key"] keypair["fingerprint"] = fingerprint else: generated_key = self._gen_key() keypair["private_key"] = generated_key["private_key"] keypair["public_key"] = generated_key["public_key"] keypair["fingerprint"] = generated_key["fingerprint"] db.key_pair_create(context, keypair) return {"keypair": keypair}
def create(self, req, body): """ Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: name (required) - string public_key (optional) - string """ context = req.environ['nova.context'] authorize(context) params = body['keypair'] name = params['name'] if not 0 < len(name) < 256: msg = _('Keypair name must be between 1 and 255 characters long') raise webob.exc.HTTPBadRequest(explanation=msg) # NOTE(ja): generation is slow, so shortcut invalid name exception try: db.key_pair_get(context, context.user_id, name) raise exception.KeyPairExists(key_name=name) except exception.NotFound: pass keypair = {'user_id': context.user_id, 'name': name} # import if public_key is sent if 'public_key' in params: try: fingerprint = crypto.generate_fingerprint(params['public_key']) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise webob.exc.HTTPBadRequest(explanation=msg) keypair['public_key'] = params['public_key'] keypair['fingerprint'] = fingerprint else: generated_key = self._gen_key() keypair['private_key'] = generated_key['private_key'] keypair['public_key'] = generated_key['public_key'] keypair['fingerprint'] = generated_key['fingerprint'] db.key_pair_create(context, keypair) return {'keypair': keypair}
def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') # NOTE(danms): Check to see if it exists in the old DB before # letting them create in the API DB, since we won't get protection # from the UC. try: db.key_pair_get(self._context, self.user_id, self.name) raise exception.KeyPairExists(key_name=self.name) except exception.KeypairNotFound: pass self._create()
def create(self, req, body): """ Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: name (required) - string public_key (optional) - string """ context = req.environ['nova.context'] params = body['keypair'] name = params['name'] # NOTE(ja): generation is slow, so shortcut invalid name exception try: db.key_pair_get(context, context.user_id, name) raise exception.KeyPairExists(key_name=name) except exception.NotFound: pass keypair = {'user_id': context.user_id, 'name': name} # import if public_key is sent if 'public_key' in params: tmpdir = tempfile.mkdtemp() fn = os.path.join(tmpdir, 'import.pub') with open(fn, 'w') as pub: pub.write(params['public_key']) fingerprint = crypto.generate_fingerprint(fn) shutil.rmtree(tmpdir) keypair['public_key'] = params['public_key'] keypair['fingerprint'] = fingerprint else: generated_key = self._gen_key() keypair['private_key'] = generated_key['private_key'] keypair['public_key'] = generated_key['public_key'] keypair['fingerprint'] = generated_key['fingerprint'] db.key_pair_create(context, keypair) return {'keypair': keypair}
def get_by_name(cls, context, user_id, name, localonly=False): db_keypair = None if not localonly: try: db_keypair = cls._get_from_db(context, user_id, name) except exception.KeypairNotFound: pass if db_keypair is None: db_keypair = db.key_pair_get(context, user_id, name) return cls._from_db_object(context, cls(), db_keypair)
def _gen_key(self, context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint}
def test_key_generation(self): result = self._create_key('test') private_key = result['private_key'] key = RSA.load_key_string(private_key, callback=lambda: None) bio = BIO.MemoryBuffer() public_key = db.key_pair_get(self.context, self.context.user.id, 'test')['public_key'] key.save_pub_key_bio(bio) converted = crypto.ssl_pub_to_ssh_pub(bio.read()) # assert key fields are equal self.assertEqual(public_key.split(" ")[1].strip(), converted.split(" ")[1].strip())
def test_key_generation(self): result = self._create_key('test') private_key = result['private_key'] key = RSA.load_key_string(private_key, callback=lambda: None) bio = BIO.MemoryBuffer() public_key = db.key_pair_get(self.context, self.context.user.id, 'test')['public_key'] key.save_pub_key_bio(bio) converted = crypto.ssl_pub_to_ssh_pub(bio.read()) # assert key fields are equal self.assertEqual( public_key.split(" ")[1].strip(), converted.split(" ")[1].strip())
def show(self, req, resp_obj, id): """ Show keypairs, additional add created_at and private_key_url to response. """ context = req.environ['nova.context'] authorize(context) self._init_nos_api(context) resp_keypair = resp_obj.obj['keypair'] key_name = resp_keypair['name'] keypair = db.key_pair_get(context, context.user_id, key_name) created_at_local_tz = self._tz_utc_to_local(keypair['created_at']) # NOTE(hzyangtk): when fingerprint is end with '.create', remove it is_create = keypair['fingerprint'].endswith('.create') if is_create: keypair['fingerprint'] = keypair['fingerprint'][:-7] if FLAGS.keypairs_connect_nos and is_create: create_timestamp = self._datetime_to_timestamp(created_at_local_tz) if not self._expire_time_check(create_timestamp): expires = long(create_timestamp + self.expires_time) if context.user_name is not None: private_key_name = context.user_name + '_' \ + keypair['name'] \ + '.private' else: private_key_name = \ keypair['fingerprint'].replace(':', '') keypair['private_key_url'] = self.call_nos.get_object_url( self.bucket_name, private_key_name, expires, self.use_domain) resp_keypair['private_key_url'] = keypair.get('private_key_url', 'expired') resp_keypair['created_at'] = self._datetime_to_string( created_at_local_tz)
def create_instance(self, req, body, create_method): if not body: raise faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] password = self.controller._get_server_admin_password(body['server']) key_name = body['server'].get('key_name') key_data = None if key_name: try: key_pair = db.key_pair_get(context, context.user_id, key_name) key_name = key_pair['name'] key_data = key_pair['public_key'] except: msg = _("Can not load the requested key %s" % key_name) return faults.Fault(exc.HTTPBadRequest(msg)) else: key_name = None key_data = None key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if key_pairs: key_pair = key_pairs[0] key_name = key_pair['name'] key_data = key_pair['public_key'] image_href = self.controller._image_ref_from_req_data(body) try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except Exception, e: msg = _("Cannot find requested image %(image_href)s: %(e)s" % locals()) raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
def get_by_name(cls, context, user_id, name): db_keypair = db.key_pair_get(context, user_id, name) return cls._from_db_object(context, cls(), db_keypair)
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn( _("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError( _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug( _("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "injected_files": injected_files } }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create(self, req): """ Creates a new server for a given user """ env = self._deserialize_create(req) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] password = self._get_server_admin_password(env['server']) key_name = env['server'].get('key_name') key_data = None if key_name: try: key_pair = db.key_pair_get(context, context.user_id, key_name) key_name = key_pair['name'] key_data = key_pair['public_key'] except: msg = _("Can not load the requested key %s" % key_name) return faults.Fault(exc.HTTPBadRequest(msg)) else: # backwards compatibility key_pairs = auth_manager.AuthManager.get_key_pairs(context) if key_pairs: key_pair = key_pairs[0] key_name = key_pair['name'] key_data = key_pair['public_key'] image_id = self._image_id_from_req_data(env) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) personality = env['server'].get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) flavor_id = self._flavor_id_from_req_data(env) if not 'name' in env['server']: msg = _("Server name is not defined") return exc.HTTPBadRequest(msg) print "4444" name = env['server']['name'] self._validate_server_name(name) name = name.strip() try: inst_type = \ instance_types.get_instance_type_by_flavor_id(flavor_id) (inst, ) = self.compute_api.create( context, inst_type, image_id, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, display_description=name, key_name=key_name, key_data=key_data, user_data=env['server'].get('user_data'), metadata=env['server'].get('metadata', {}), injected_files=injected_files, admin_password=password) except quota.QuotaError as error: self._handle_quota_error(error) inst['instance_type'] = inst_type inst['image_id'] = image_id builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) server['server']['adminPass'] = password return server
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type} elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "injected_files": injected_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, reservation_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " "more instances of this type.") else: message = _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances raise quota.QuotaError(message, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) (image_service, image_id) = nova.image.get_image_service(image_href) image = image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] architecture = None if 'properties' in image and 'arch' in image['properties']: architecture = image['properties']['arch'] vm_mode = None if 'properties' in image and 'vm_mode' in image['properties']: vm_mode = image['properties']['vm_mode'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: image_service.show(context, kernel_id) if ramdisk_id: image_service.show(context, ramdisk_id) self.ensure_default_security_group(context) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') root_device_name = ec2utils.properties_root_device_name( image['properties']) base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, 'root_device_name': root_device_name} return (num_instances, base_options, image)
def create(self, req, resp_obj, body): """ When create action, not import, upload private key to NOS and update private_key_url in DB. """ # NOTE(hzyangtk): Catch the create response from keypairs API. # If is create(not import), upload the generate # private key to NOS. And store private key url # of NOS into nova.keypairs table. context = req.environ['nova.context'] authorize(context) self._init_nos_api(context) keypair = resp_obj.obj['keypair'] params = body['keypair'] # NOTE(hzyangtk): when do creating keypairs, fingerprint will # be add an extra string '.create' to end. This # action is target to idetify create and import is_create = False if 'public_key' not in params: is_create = True create_fingerprint = keypair['fingerprint'] + '.create' db.key_pair_update_fingerprint(context, context.user_id, keypair['name'], create_fingerprint) if is_create and FLAGS.keypairs_connect_nos: # NOTE(hzyangtk): This means this is create but not import. # Then, determine use nos to store # keypairs or not by FLAGS.keypairs_connect_nos. # if use nos to store keypairs, it will # upload private key to nos when create # return a private key url of nos to NOVA # and store it into db with private_key_url try: tmp_data = db.key_pair_get(context, context.user_id, keypair['name']) created_at_local_tz = self._tz_utc_to_local( tmp_data['created_at']) create_timestamp = self._datetime_to_timestamp( created_at_local_tz) expires = long(create_timestamp + self.expires_time) if context.user_name is not None: private_key_name = context.user_name + '_' \ + keypair['name'] \ + '.private' else: private_key_name = keypair['fingerprint'].replace(':', '') private_key_content = keypair['private_key'] check_bucket = self.call_nos.check_bucket_exist( self.bucket_name) if not check_bucket: self.call_nos.create_bucket(self.bucket_name) else: check_object = self.call_nos.check_object_exist( self.bucket_name, private_key_name) if check_object: self.call_nos.delete_private_key(self.bucket_name, private_key_name) private_key_url = self.call_nos.upload_private_key( self.bucket_name, private_key_name, private_key_content, expires, self.use_domain) keypair['private_key_url'] = private_key_url except (webob.exc.HTTPClientError, webob.exc.HTTPRequestTimeout): # NOTE(hzyangtk): when NOS connect error occurs, delete the # generated keypair. self._notify_NOS_connection_failure(context, tmp_data) try: db.key_pair_destroy(context, context.user_id, keypair['name']) except exception.KeypairNotFound: # NOTE(hzyangtk): when keypair not found, to do nothing pass nos_url = FLAGS.nos_url nos_host = FLAGS.nos_host nos_accessKey = FLAGS.nos_accessKey nos_accessSecret = FLAGS.nos_accessSecret LOG.exception(_("Connect to NOS error, " "nos_url: %(nos_url)s, " "nos_host: %(nos_host)s, " "nos_accessKey: %(nos_accessKey)s," "nos_accessSecret: %(nos_accessSecret)s."), locals()) err_msg = _("Private key URL generate failed") raise webob.exc.HTTPServerError(explanation=err_msg)
def create(self, req): """ Creates a new server for a given user """ env = self._deserialize_create(req) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] password = self._get_server_admin_password(env['server']) key_name = env['server'].get('key_name') key_data = None if key_name: try: key_pair = db.key_pair_get(context, context.user_id, key_name) key_name = key_pair['name'] key_data = key_pair['public_key'] except: msg = _("Can not load the requested key %s" % key_name) return faults.Fault(exc.HTTPBadRequest(msg)) else: # backwards compatibility key_pairs = auth_manager.AuthManager.get_key_pairs(context) if key_pairs: key_pair = key_pairs[0] key_name = key_pair['name'] key_data = key_pair['public_key'] image_id = self._image_id_from_req_data(env) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) personality = env['server'].get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) flavor_id = self._flavor_id_from_req_data(env) if not 'name' in env['server']: msg = _("Server name is not defined") return exc.HTTPBadRequest(msg) print "4444" name = env['server']['name'] self._validate_server_name(name) name = name.strip() try: inst_type = \ instance_types.get_instance_type_by_flavor_id(flavor_id) (inst,) = self.compute_api.create( context, inst_type, image_id, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, display_description=name, key_name=key_name, key_data=key_data, user_data=env['server'].get('user_data'), metadata=env['server'].get('metadata', {}), injected_files=injected_files, admin_password=password) except quota.QuotaError as error: self._handle_quota_error(error) inst['instance_type'] = inst_type inst['image_id'] = image_id builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) server['server']['adminPass'] = password return server
def create( self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name="", display_description="", key_name=None, key_data=None, security_group="default", availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, ): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " "more instances of this type.") else: message = ( _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances ) raise quota.QuotaError(message, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if "properties" in image and "os_type" in image["properties"]: os_type = image["properties"]["os_type"] if kernel_id is None: kernel_id = image["properties"].get("kernel_id", None) if ramdisk_id is None: ramdisk_id = image["properties"].get("ramdisk_id", None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ["default"] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group["id"]) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair["public_key"] base_options = { "reservation_id": utils.generate_uid("r"), "image_id": image_id, "kernel_id": kernel_id or "", "ramdisk_id": ramdisk_id or "", "state": 0, "state_description": "scheduling", "user_id": context.user_id, "project_id": context.project_id, "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "instance_type_id": instance_type["id"], "memory_mb": instance_type["memory_mb"], "vcpus": instance_type["vcpus"], "local_gb": instance_type["local_gb"], "display_name": display_name, "display_description": display_description, "user_data": user_data or "", "key_name": key_name, "key_data": key_data, "locked": False, "metadata": metadata, "availability_zone": availability_zone, "os_type": os_type, } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance["id"] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if not hasattr(instance, "display_name") or instance.display_name is None: updates["display_name"] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) # NOTE(sandy): For now we're just going to pass in the # instance_type record to the scheduler. In a later phase # we'll be ripping this whole for-loop out and deferring the # creation of the Instance record. At that point all this will # change. rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "instance_type": instance_type, "availability_zone": availability_zone, "injected_files": injected_files, "admin_password": admin_password, }, }, ) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata=[], onset_files=None): """Create the number of instances requested if quota and other arguments check out ok.""" type_data = instance_types.get_instance_type(instance_type) num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id msg = (_("Quota exceeeded for %(pid)s," " tried to set %(num_metadata)s metadata properties") % locals()) LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for metadata_item in metadata: k = metadata_item['key'] v = metadata_item['value'] if len(k) > 255 or len(v) > 255: pid = context.project_id msg = (_("Quota exceeeded for %(pid)s," " metadata property key or value too long") % locals()) LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image.get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': instance_type, 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone} elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name == None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "onset_files": onset_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create( self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name="", display_description="", key_name=None, key_data=None, security_group="default", availability_zone=None, user_data=None, onset_files=None, ): """Create the number of instances requested if quota and other arguments check out ok. """ type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError( _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded", ) image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get("kernel_id", None) if ramdisk_id is None: ramdisk_id = image.get("ramdisk_id", None) # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ["default"] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group["id"]) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair["public_key"] base_options = { "reservation_id": utils.generate_uid("r"), "image_id": image_id, "kernel_id": kernel_id or "", "ramdisk_id": ramdisk_id or "", "state_description": "scheduling", "user_id": context.user_id, "project_id": context.project_id, "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "instance_type": instance_type, "memory_mb": type_data["memory_mb"], "vcpus": type_data["vcpus"], "local_gb": type_data["local_gb"], "display_name": display_name, "display_description": display_description, "user_data": user_data or "", "key_name": key_name, "key_data": key_data, "locked": False, "availability_zone": availability_zone, } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance["id"] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if not hasattr(instance, "display_name") or instance.display_name == None: updates["display_name"] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "onset_files": onset_files, }, }, ) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]