def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, None, self.parsed_data) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.set_pipeline(pipeline) counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) if counts[name] >= 2: reset_context = ResetContext() reset_context.section = 'deploy' pipeline.add_action(reset_context) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy(pipeline, parameters, job) self.assertEqual( {'common': {}}, test_deploy.action.data ) counts[name] += 1 # check that only one action has the example set self.assertEqual( ['nowhere'], [detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy']] ) self.assertEqual( ['faked', 'valid'], [detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy']] ) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], ResetContext) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[3], ResetContext) self.assertIsInstance(pipeline.actions[4], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {'common': {}, 'fake_deploy': pipeline.actions[0].parameters}) self.assertNotEqual(pipeline.actions[1].data, {'common': {}, 'fake_deploy': pipeline.actions[1].parameters}) self.assertEqual(pipeline.actions[2].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[4].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters})
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target for instance in Protocol.select_all(job.parameters): job.protocols.append(instance(job.parameters)) pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type( action_data[name] ) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context[ 'default_action_duration'] action_data[name]['default_test_timeout'] = self.context[ 'default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context['default_action_duration'] action_data[name]['default_test_timeout'] = self.context['default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, None, self.parsed_data) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.set_pipeline(pipeline) counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) if counts[name] >= 2: reset_context = ResetContext() reset_context.section = 'deploy' pipeline.add_action(reset_context) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({'common': {}}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual(['nowhere'], [ detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy'] ]) self.assertEqual(['faked', 'valid'], [ detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy'] ]) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], ResetContext) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[3], ResetContext) self.assertIsInstance(pipeline.actions[4], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, { 'common': {}, 'fake_deploy': pipeline.actions[0].parameters }) self.assertNotEqual(pipeline.actions[1].data, { 'common': {}, 'fake_deploy': pipeline.actions[1].parameters }) self.assertEqual(pipeline.actions[2].data, { 'common': {}, 'fake_deploy': pipeline.actions[4].parameters }) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[4].data, { 'common': {}, 'fake_deploy': pipeline.actions[4].parameters })