def restore(self, data): """Restore state of this service from datafile""" # The state of a service is more easier than for jobs. There are just a few things we want to guarantee: # 1. If service instances are up, they can continue to be up. We'll just start monitoring from where we left off. # 2. Failures are maintained and have to be cleared. # Start our machine from where it left off self.machine.state = state.named_event_by_name(Service.STATE_DOWN, data['state']) if self.machine.state in (Service.STATE_DOWN, Service.STATE_FAILED): self.event_recorder.emit_info("restored") return # Restore all the instances # We're going to just indicate they are up and start a monitor for instance in data['instances']: try: node = self.node_pool[instance['node']] except KeyError: log.error("Failed to find node %s in pool for %s", instance['node'], self.name) continue service_instance = self._create_instance(node, instance['instance_number']) service_instance.machine.state = ServiceInstance.STATE_MONITORING service_instance._run_monitor() self.instances.sort(key=lambda i:i.instance_number) self.event_recorder.emit_info("restored")
def restore_state(self, state_data): self.id = state_data['id'] self.machine.state = state.named_event_by_name(self.STATE_SCHEDULED, state_data['state']) self.run_time = state_data['run_time'] self.start_time = state_data['start_time'] self.end_time = state_data['end_time'] self.rendered_command = state_data['command'] # We were running when the state file was built, so we have no idea # what happened now. if self.is_running: self.machine.transition('fail_unknown')
def from_state( cls, state_data, parent_context, output_path, job_run_node, cleanup=False, ): """Restore the state of this ActionRun from a serialized state.""" pool_repo = node.NodePoolRepository.get_instance() # Support state from older version if 'id' in state_data: job_run_id, action_name = state_data['id'].rsplit('.', 1) else: job_run_id = state_data['job_run_id'] action_name = state_data['action_name'] job_run_node = pool_repo.get_node( state_data.get('node_name'), job_run_node, ) rendered_command = state_data.get('rendered_command') run = cls( job_run_id, action_name, job_run_node, parent_context=parent_context, output_path=output_path, rendered_command=rendered_command, bare_command=state_data['command'], cleanup=cleanup, start_time=state_data['start_time'], end_time=state_data['end_time'], run_state=state.named_event_by_name( cls.STATE_SCHEDULED, state_data['state'], ), exit_status=state_data.get('exit_status'), ) # Transition running to fail unknown because exit status was missed if run.is_running: run._done('fail_unknown') if run.is_starting: run.fail(None) return run
def from_state(cls, state_data, parent_context, output_path, job_run_node, cleanup=False): """Restore the state of this ActionRun from a serialized state.""" pool_repo = node.NodePoolRepository.get_instance() # Support state from older version if 'id' in state_data: job_run_id, action_name = state_data['id'].rsplit('.', 1) else: job_run_id = state_data['job_run_id'] action_name = state_data['action_name'] job_run_node = pool_repo.get_node( state_data.get('node_name'), job_run_node) rendered_command = state_data.get('rendered_command') run = cls( job_run_id, action_name, job_run_node, parent_context=parent_context, output_path=output_path, rendered_command=rendered_command, bare_command=state_data['command'], cleanup=cleanup, start_time=state_data['start_time'], end_time=state_data['end_time'], run_state=state.named_event_by_name( cls.STATE_SCHEDULED, state_data['state']), exit_status=state_data.get('exit_status') ) # Transition running to fail unknown because exit status was missed if run.is_running: run._done('fail_unknown') if run.is_starting: run.fail(None) return run
def test(self): assert_equal(state.named_event_by_name(self.start, "c"), self.end)
def test_match(self): assert_equal(state.named_event_by_name(self.start, "c"), self.end)