def destroy_guest_and_wait_for_failure(self): """Make sure the Trove Compute Manager FAILS a timed-out guest.""" # Utterly kill the guest install. process("sudo rm -rf /vz/private/%s/bin" % str(self.local_id)) # Make sure that before the timeout expires the guest state in the # internal API and the REST API instance status is set to FAIL. self.wait_for_rest_api_to_show_status_as_failed(time_out=GUEST_INSTALL_TIMEOUT + 30) # At this point there is a tiny chance the compute API will spend a # little bit of time trying to suspend the instance. We need it to # because, while in this case we know the guest is dead, in the real # world where this might happen for less-predictable reasons we want # to make sure the misbehaving or just slow Nova-Guest daemon doesn't # change its status to something besides FAILED before the instance is # shut-off. So we have to make sure that the instance turns off, and # the manager sets the guest state to FAILED afterwards. self.wait_for_compute_instance_to_suspend() # TODO(tim.simpson): It'd be really cool if we could somehow coax the # guest to repeatedly setting its state in the db to # something besides failed, so we could assert that # no matter what after it was suspended it was set # to such. Although maybe that's overkill. self._assert_status_failure(self._get_status_tuple()) abort_count2 = count_notifications(notifier.ERROR, "trove.instance.abort.guest") assert_true(self.abort_count < abort_count2)
def destroy_guest_and_wait_for_failure(self): """Make sure the Reddwarf Compute Manager FAILS a timed-out guest.""" # Utterly kill the guest install. process("sudo rm -rf /vz/private/%s/bin" % str(self.local_id)) # Make sure that before the timeout expires the guest state in the # internal API and the REST API instance status is set to FAIL. self.wait_for_rest_api_to_show_status_as_failed( time_out=GUEST_INSTALL_TIMEOUT + 30) # At this point there is a tiny chance the compute API will spend a # little bit of time trying to suspend the instance. We need it to # because, while in this case we know the guest is dead, in the real # world where this might happen for less-predictable reasons we want # to make sure the misbehaving or just slow Nova-Guest daemon doesn't # change its status to something besides FAILED before the instance is # shut-off. So we have to make sure that the instance turns off, and # the manager sets the guest state to FAILED afterwards. self.wait_for_compute_instance_to_suspend() #TODO(tim.simpson): It'd be really cool if we could somehow coax the # guest to repeatedly setting its state in the db to # something besides failed, so we could assert that # no matter what after it was suspended it was set # to such. Although maybe that's overkill. self._assert_status_failure(self._get_status_tuple()) abort_count2 = count_notifications(notifier.ERROR, "reddwarf.instance.abort.guest") assert_true(self.abort_count < abort_count2)
def wait_for_failure(self): """Make sure the Reddwarf Compute Manager FAILS a timed-out volume.""" self.instance_exists = True self.wait_for_rest_api_to_show_status_as_failed(VOLUME_TIME_OUT + 30) abort_count2 = count_notifications(notifier.ERROR, "reddwarf.instance.abort.volume") assert_true(self.abort_count < abort_count2)
def create_instance(self): self.abort_count = count_notifications(notifier.ERROR, "reddwarf.instance.abort.guest") self._create_instance() metadata = ReddwarfInstanceMetaData(self.db, context.get_admin_context(), self.local_id) self.volume_id = metadata.volume_id assert_is_not_none(metadata.volume)
def create_instance(self): """Create a new instance.""" self.abort_count = count_notifications(notifier.ERROR, "trove.instance.abort.volume") self._create_instance() # Use an admin context to avoid the possibility that in between the # previous line and this one the request goes through and the instance # is deleted. metadata = TroveInstanceMetaData(self.db, context.get_admin_context(), self.local_id) self.volume_id = metadata.volume_id
def create_instance(self): """Create a new instance.""" self.abort_count = count_notifications(notifier.ERROR, "reddwarf.instance.abort.volume") self._create_instance() # Use an admin context to avoid the possibility that in between the # previous line and this one the request goes through and the instance # is deleted. metadata = ReddwarfInstanceMetaData(self.db, context.get_admin_context(), self.local_id) self.volume_id = metadata.volume_id
def out_of_instance_memory_nofication_count(): """Counts the times an OutOfInstanceMemory notification has been raised.""" return count_notifications(notifier.ERROR, "out.of.instance.memory")