def test_task_update_with_states(self): self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, verify_states=True) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.BUILDING, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertIsNone(payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname)
def test_vm_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING, task_states.SPAWNING, verify_states=True) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname node = self.instance.node self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.ACTIVE, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertEqual(task_states.SPAWNING, payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname) self.assertEqual(payload["node"], node)
def test_task_update_with_states(self): self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, verify_states=True) self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) notif = test_notifier.NOTIFICATIONS[0] payload = notif["payload"] access_ip_v4 = self.instance["access_ip_v4"] access_ip_v6 = self.instance["access_ip_v6"] display_name = self.instance["display_name"] hostname = self.instance["hostname"] self.assertEquals(vm_states.BUILDING, payload["old_state"]) self.assertEquals(vm_states.BUILDING, payload["state"]) self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) self.assertEquals(None, payload["new_task_state"]) self.assertEquals(payload["access_ip_v4"], access_ip_v4) self.assertEquals(payload["access_ip_v6"], access_ip_v6) self.assertEquals(payload["display_name"], display_name) self.assertEquals(payload["hostname"], hostname)
def test_task_notif(self): # test config disable of just the task state notifications self.flags(notify_on_state_change="vm_state") # we should not get a notification on task stgate chagne now old = copy.copy(self.instance) self.instance.task_state = task_states.SPAWNING old_vm_state = old['vm_state'] new_vm_state = self.instance.vm_state old_task_state = old['task_state'] new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, verify_states=True) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) # ok now enable task state notifications and re-try self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_task_update_with_states(self): self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, verify_states=True, ) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.BUILDING, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertIsNone(payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname)
def test_vm_update_with_states(self): notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING, task_states.SPAWNING, verify_states=True, ) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname node = self.instance.node self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.ACTIVE, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertEqual(task_states.SPAWNING, payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname) self.assertEqual(payload["node"], node)
def test_task_update_with_states(self): self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, verify_states=True, ) self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) notif = test_notifier.NOTIFICATIONS[0] payload = notif["payload"] access_ip_v4 = self.instance["access_ip_v4"] access_ip_v6 = self.instance["access_ip_v6"] display_name = self.instance["display_name"] hostname = self.instance["hostname"] self.assertEquals(vm_states.BUILDING, payload["old_state"]) self.assertEquals(vm_states.BUILDING, payload["state"]) self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) self.assertEquals(None, payload["new_task_state"]) self.assertEquals(payload["access_ip_v4"], access_ip_v4) self.assertEquals(payload["access_ip_v6"], access_ip_v6) self.assertEquals(payload["display_name"], display_name) self.assertEquals(payload["hostname"], hostname)
def test_no_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, task_states.SPAWNING) self.assertEquals(0, len(test_notifier.NOTIFICATIONS))
def test_update_with_service_name(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, service="testservice") self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = test_notifier.NOTIFICATIONS[0] self.assertEquals('testservice.testhost', notif['publisher_id'])
def test_update_with_host_name(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, host="someotherhost") self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.someotherhost', notif.publisher_id)
def test_no_update_with_states(self): notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, task_states.SPAWNING, verify_states=True, ) self.assertEquals(0, len(test_notifier.NOTIFICATIONS))
def test_task_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None) self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) notif = test_notifier.NOTIFICATIONS[0] payload = notif["payload"] self.assertEquals(vm_states.BUILDING, payload["old_state"]) self.assertEquals(vm_states.BUILDING, payload["state"]) self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) self.assertEquals(None, payload["new_task_state"])
def test_send_no_notif(self): # test notification on send no initial vm state: old_vm_state = self.instance.vm_state new_vm_state = self.instance.vm_state old_task_state = self.instance.task_state new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, service="compute", host=None, verify_states=True) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_fail_send_update_with_states_inst_not_found(self, mock_log_exception): # Tests that InstanceNotFound is handled as an expected exception and # not logged as an error. notfound = exception.InstanceNotFound(instance_id=self.instance.uuid) with mock.patch.object(notifications, '_send_instance_update_notification', side_effect=notfound): notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.ERROR, task_states.NETWORKING, new_task_state=None) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(0, mock_log_exception.call_count)
def test_notif_disabled(self): # test config disable of the notifications self.flags(notify_on_state_change=None) old = copy.copy(self.instance) self.instance.vm_state = vm_states.ACTIVE old_vm_state = old['vm_state'] new_vm_state = self.instance.vm_state old_task_state = old['task_state'] new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, verify_states=True) notifications.send_update(self.context, old, self.instance) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_vm_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING, task_states.SPAWNING, verify_states=True) self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) notif = test_notifier.NOTIFICATIONS[0] payload = notif["payload"] access_ip_v4 = self.instance["access_ip_v4"] access_ip_v6 = self.instance["access_ip_v6"] display_name = self.instance["display_name"] self.assertEquals(vm_states.BUILDING, payload["old_state"]) self.assertEquals(vm_states.ACTIVE, payload["state"]) self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) self.assertEquals(task_states.SPAWNING, payload["new_task_state"]) self.assertEquals(payload["access_ip_v4"], access_ip_v4) self.assertEquals(payload["access_ip_v6"], access_ip_v6) self.assertEquals(payload["display_name"], display_name)
def test_notif_disabled(self): # test config disable of the notifcations self.flags(notify_on_state_change=None) self.flags(notify_on_any_change=False) old = copy.copy(self.instance) self.instance["vm_state"] = vm_states.ACTIVE old_vm_state = old["vm_state"] new_vm_state = self.instance["vm_state"] old_task_state = old["task_state"] new_task_state = self.instance["task_state"] notifications.send_update_with_states( self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, verify_states=True ) notifications.send_update(self.context, old, self.instance) self.assertEquals(0, len(test_notifier.NOTIFICATIONS))
def schedule_and_build_instances(self, context, build_requests, request_specs, image, admin_password, injected_files, requested_networks, block_device_mapping): legacy_spec = request_specs[0].to_legacy_request_spec_dict() try: hosts = self._schedule_instances( context, legacy_spec, request_specs[0].to_legacy_filter_properties_dict()) except Exception as exc: LOG.exception(_LE('Failed to schedule instances')) self._bury_in_cell0(context, request_specs[0], exc, build_requests=build_requests) return host_mapping_cache = {} for (build_request, request_spec, host) in six.moves.zip(build_requests, request_specs, hosts): filter_props = request_spec.to_legacy_filter_properties_dict() scheduler_utils.populate_filter_properties(filter_props, host) instance = build_request.get_new_instance(context) # Convert host from the scheduler into a cell record if host['host'] not in host_mapping_cache: try: host_mapping = objects.HostMapping.get_by_host( context, host['host']) host_mapping_cache[host['host']] = host_mapping except exception.HostMappingNotFound as exc: LOG.error( _LE('No host-to-cell mapping found for selected ' 'host %(host)s. Setup is incomplete.'), {'host': host['host']}) self._bury_in_cell0(context, request_spec, exc, build_requests=[build_request], instances=[instance]) continue else: host_mapping = host_mapping_cache[host['host']] cell = host_mapping.cell_mapping with obj_target_cell(instance, cell): instance.create() # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="conductor") objects.InstanceAction.action_start(context, instance.uuid, instance_actions.CREATE, want_result=False) with obj_target_cell(instance, cell): instance_bdms = self._create_block_device_mapping( instance.flavor, instance.uuid, block_device_mapping) # Update mapping for instance. Normally this check is guarded by # a try/except but if we're here we know that a newer nova-api # handled the build process and would have created the mapping inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) inst_mapping.cell_mapping = cell inst_mapping.save() try: build_request.destroy() except exception.BuildRequestNotFound: # This indicates an instance deletion request has been # processed, and the build should halt here. Clean up the # bdm and instance record. with obj_target_cell(instance, cell): try: instance.destroy() except exception.InstanceNotFound: pass except exception.ObjectActionError: # NOTE(melwitt): Instance became scheduled during # the destroy, "host changed". Refresh and re-destroy. try: instance.refresh() instance.destroy() except exception.InstanceNotFound: pass for bdm in instance_bdms: with obj_target_cell(bdm, cell): try: bdm.destroy() except exception.ObjectActionError: pass return # NOTE(danms): Compute RPC expects security group names or ids # not objects, so convert this to a list of names until we can # pass the objects. legacy_secgroups = [ s.identifier for s in request_spec.security_groups ] with obj_target_cell(instance, cell): self.compute_rpcapi.build_and_run_instance( context, instance=instance, image=image, request_spec=request_spec, filter_properties=filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=legacy_secgroups, block_device_mapping=instance_bdms, host=host['host'], node=host['nodename'], limits=host['limits'])
def apply(self, context, resource): self.security_group_api = \ openstack_driver.get_openstack_security_group_driver() base_options = { 'reservation_id': resource.reservation_id, 'image_ref': resource.image_href, 'kernel_id': resource.kernel_id or '', 'ramdisk_id': resource.ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive_id': resource.config_drive_id or '', 'config_drive': resource.config_drive or '', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': resource.instance_type['id'], 'memory_mb': resource.instance_type['memory_mb'], 'vcpus': resource.instance_type['vcpus'], 'root_gb': resource.instance_type['root_gb'], 'ephemeral_gb': resource.instance_type['ephemeral_gb'], 'display_name': resource.display_name, 'display_description': resource.display_description, 'user_data': resource.user_data, 'key_name': resource.key_name, 'key_data': resource.key_data, 'locked': False, 'metadata': resource.metadata, 'access_ip_v4': resource.access_ip_v4, 'access_ip_v6': resource.access_ip_v6, 'availability_zone': resource.availability_zone, 'root_device_name': resource.root_device_name, 'progress': 0, 'system_metadata': resource.system_metadata} options_from_image = self._inherit_properties_from_image( resource.image, resource.auto_disk_config) base_options.update(options_from_image) LOG.debug(_("Going to run %s instances..."), resource.num_instances) filter_properties = dict(scheduler_hints=resource.scheduler_hints) if resource.forced_host: _check_policy(context, 'create:forced_host', {}) filter_properties['force_hosts'] = [resource.forced_host] resource.filter_properties = filter_properties # Create DB Entry for the instances and initiate a workflow request for i in xrange(resource.num_instances): options = base_options.copy() instance = self._create_db_entry_for_new_instance(context, resource.image, options, resource.security_group, resource.block_device_mapping, resource.num_instances, i) resource.instances.append(jsonutils.to_primitive(instance)) # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="api") # Commit the reservations QUOTAS.commit(context, resource.quota_reservations) # Record the starting of instances in the db for instance in resource.instances: self._record_action_start(context, instance, instance_actions.CREATE) return orc_utils.DictableObject(details='created_db_entry', resource=resource)
def schedule_and_build_instances(self, context, build_requests, request_specs, image, admin_password, injected_files, requested_networks, block_device_mapping): legacy_spec = request_specs[0].to_legacy_request_spec_dict() try: hosts = self._schedule_instances(context, legacy_spec, request_specs[0].to_legacy_filter_properties_dict()) except Exception as exc: LOG.exception(_LE('Failed to schedule instances')) self._bury_in_cell0(context, request_specs[0], exc, build_requests=build_requests) return host_mapping_cache = {} for (build_request, request_spec, host) in six.moves.zip( build_requests, request_specs, hosts): filter_props = request_spec.to_legacy_filter_properties_dict() scheduler_utils.populate_filter_properties(filter_props, host) instance = build_request.get_new_instance(context) # Convert host from the scheduler into a cell record if host['host'] not in host_mapping_cache: try: host_mapping = objects.HostMapping.get_by_host( context, host['host']) host_mapping_cache[host['host']] = host_mapping except exception.HostMappingNotFound as exc: LOG.error(_LE('No host-to-cell mapping found for selected ' 'host %(host)s. Setup is incomplete.'), {'host': host['host']}) self._bury_in_cell0(context, request_spec, exc, build_requests=[build_request], instances=[instance]) continue else: host_mapping = host_mapping_cache[host['host']] cell = host_mapping.cell_mapping with obj_target_cell(instance, cell): instance.create() # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="conductor") objects.InstanceAction.action_start( context, instance.uuid, instance_actions.CREATE, want_result=False) with obj_target_cell(instance, cell): instance_bdms = self._create_block_device_mapping( instance.flavor, instance.uuid, block_device_mapping) # Update mapping for instance. Normally this check is guarded by # a try/except but if we're here we know that a newer nova-api # handled the build process and would have created the mapping inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) inst_mapping.cell_mapping = cell inst_mapping.save() try: build_request.destroy() except exception.BuildRequestNotFound: # This indicates an instance deletion request has been # processed, and the build should halt here. Clean up the # bdm and instance record. with obj_target_cell(instance, cell): try: instance.destroy() except exception.InstanceNotFound: pass for bdm in instance_bdms: with obj_target_cell(bdm, cell): try: bdm.destroy() except exception.ObjectActionError: pass return # NOTE(danms): Compute RPC expects security group names or ids # not objects, so convert this to a list of names until we can # pass the objects. legacy_secgroups = [s.identifier for s in request_spec.security_groups] with obj_target_cell(instance, cell): self.compute_rpcapi.build_and_run_instance( context, instance=instance, image=image, request_spec=request_spec, filter_properties=filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=legacy_secgroups, block_device_mapping=instance_bdms, host=host['host'], node=host['nodename'], limits=host['limits'])
def schedule_and_build_instances(self, context, build_requests, request_specs, image, admin_password, injected_files, requested_networks, block_device_mapping): legacy_spec = request_specs[0].to_legacy_request_spec_dict() try: hosts = self._schedule_instances(context, legacy_spec, request_specs[0].to_legacy_filter_properties_dict()) except Exception as exc: LOG.exception(_LE('Failed to schedule instances')) self._bury_in_cell0(context, request_specs[0], exc, build_requests=build_requests) return host_mapping_cache = {} for (build_request, request_spec, host) in six.moves.zip( build_requests, request_specs, hosts): filter_props = request_spec.to_legacy_filter_properties_dict() instance = build_request.get_new_instance(context) scheduler_utils.populate_retry(filter_props, instance.uuid) scheduler_utils.populate_filter_properties(filter_props, host) # Convert host from the scheduler into a cell record if host['host'] not in host_mapping_cache: try: host_mapping = objects.HostMapping.get_by_host( context, host['host']) host_mapping_cache[host['host']] = host_mapping except exception.HostMappingNotFound as exc: LOG.error(_LE('No host-to-cell mapping found for selected ' 'host %(host)s. Setup is incomplete.'), {'host': host['host']}) self._bury_in_cell0(context, request_spec, exc, build_requests=[build_request], instances=[instance]) continue else: host_mapping = host_mapping_cache[host['host']] cell = host_mapping.cell_mapping # Before we create the instance, let's make one final check that # the build request is still around and wasn't deleted by the user # already. try: objects.BuildRequest.get_by_instance_uuid( context, instance.uuid) except exception.BuildRequestNotFound: # the build request is gone so we're done for this instance LOG.debug('While scheduling instance, the build request ' 'was already deleted.', instance=instance) continue else: with obj_target_cell(instance, cell): instance.create() # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="conductor") with obj_target_cell(instance, cell): objects.InstanceAction.action_start( context, instance.uuid, instance_actions.CREATE, want_result=False) instance_bdms = self._create_block_device_mapping( instance.flavor, instance.uuid, block_device_mapping) # Update mapping for instance. Normally this check is guarded by # a try/except but if we're here we know that a newer nova-api # handled the build process and would have created the mapping inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) inst_mapping.cell_mapping = cell inst_mapping.save() if not self._delete_build_request( context, build_request, instance, cell, instance_bdms): # The build request was deleted before/during scheduling so # the instance is gone and we don't have anything to build for # this one. continue # NOTE(danms): Compute RPC expects security group names or ids # not objects, so convert this to a list of names until we can # pass the objects. legacy_secgroups = [s.identifier for s in request_spec.security_groups] with obj_target_cell(instance, cell): self.compute_rpcapi.build_and_run_instance( context, instance=instance, image=image, request_spec=request_spec, filter_properties=filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=legacy_secgroups, block_device_mapping=instance_bdms, host=host['host'], node=host['nodename'], limits=host['limits'])