def test_run_yaml_error(self, get_obj_client_mock): mock_ctx = mock.MagicMock() # setup swift swift = mock.MagicMock() swift.get_object.return_value = mock.Mock(side_effect=ValueError) get_obj_client_mock.return_value = swift action = heat_capabilities.GetCapabilitiesAction(self.container_name) expected = actions.Result(data=None, error="Error parsing capabilities-map.yaml.") self.assertEqual(expected, action.run(mock_ctx))
def test_retry_async_action(self): retry_wf = """--- version: '2.0' repeated_retry: tasks: async_http: retry: delay: 0 count: 100 action: std.mistral_http url='https://google.com' """ wf_service.create_workflows(retry_wf) wf_ex = self.engine.start_workflow('repeated_retry') self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_running(task_ex.id) first_action_ex = task_ex.executions[0] self.await_action_state(first_action_ex.id, states.RUNNING) complete_action_params = ( first_action_ex.id, ml_actions.Result(error="mock") ) rpc.get_engine_client().on_action_complete(*complete_action_params) for _ in range(2): self.assertRaises( exc.MistralException, rpc.get_engine_client().on_action_complete, *complete_action_params ) self.await_task_running(task_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) action_exs = task_ex.executions self.assertEqual(2, len(action_exs)) for action_ex in action_exs: if action_ex.id == first_action_ex.id: expected_state = states.ERROR else: expected_state = states.RUNNING self.assertEqual(expected_state, action_ex.state)
def test_run_invalid_name(self, get_obj_client_mock): # Setup swift = mock.MagicMock() get_obj_client_mock.return_value = swift # Test action = plan.CreateContainerAction("invalid_underscore") result = action.run(self.ctx) error_str = ("Unable to create plan. The plan name must only contain " "letters, numbers or dashes") self.assertEqual(result, actions.Result(None, error_str))
def test_cancel_with_items_concurrency(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: tasks: t1: with-items: i in <% list(range(0, 4)) %> action: std.async_noop concurrency: 2 on-success: - t2 t2: action: std.echo output="Task 2" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item(wf1_ex.task_executions, name='t1') wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) self.assertEqual(states.RUNNING, wf1_t1_action_exs[1].state) # Cancel action execution for task. for wf1_t1_action_ex in wf1_t1_action_exs: self.engine.on_action_complete(wf1_t1_action_ex.id, ml_actions.Result(cancel=True)) self.await_task_cancelled(wf1_t1_ex.id) self.await_workflow_cancelled(wf1_ex.id) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[0].state) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[1].state)
def test_scale(self): # active nodes with assigned profiles are fine self.nodes[:] = [ self._get_fake_node(profile='compute', provision_state='active'), self._get_fake_node(profile='control') ] expected = actions.Result(data={ 'errors': [], 'warnings': [], }) self._test(expected)
def send_error_back(error_msg): error_result = mistral_lib.Result(error=error_msg) if action_ex_id: self._engine_client.on_action_complete( action_ex_id, error_result ) return None return error_result
def run(self, context): swift = self.get_object_client(context) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) try: plan_utils.update_in_env(swift, env, self.key, delete_key=True) except swiftexceptions.ClientException as err: err_msg = ("Error updating environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) self.cache_delete(context, self.container, "tripleo.parameters.get") return env
def test_no_matching_without_scale(self): self.flavors = {name: (object(), 0) for name in self.flavors} self.nodes[:] = [ self._get_fake_node(profile='fake'), self._get_fake_node(profile='fake') ] expected = actions.Result(data={ 'errors': [], 'warnings': [], }) self._test(expected)
def test_put_error_with_output(self, f): f.return_value = ERROR_ACTION_EX_WITH_OUTPUT resp = self.app.put_json('/v2/action_executions/123', ERROR_ACTION_WITH_OUTPUT) self.assertEqual(200, resp.status_int) self.assertDictEqual(ERROR_ACTION_WITH_OUTPUT, resp.json) f.assert_called_once_with( ERROR_ACTION_WITH_OUTPUT['id'], ml_actions.Result(error=ERROR_ACTION_RES_WITH_OUTPUT))
def run(self, context): try: swift = self.get_object_client(context) plan_env = plan_utils.get_env(swift, self.container) if self.env_key in plan_env.keys(): if self.delete: try: plan_env[self.env_key].pop(self.parameter) except KeyError: pass else: plan_env[self.env_key].update({self.parameter: self.value}) else: msg = "The environment key doesn't exist: %s" % self.env_key return actions.Result(error=msg) except swiftexceptions.ClientException as err: msg = "Error attempting an operation on container: %s" % err return actions.Result(error=msg) except Exception as err: msg = "Error while updating plan: %s" % err return actions.Result(error=msg)
def run(self, context): swift = self.get_object_client(context) swift_service = self.get_object_service(context) tarball_name = 'temporary_dir-%s.tar.gz' \ % timeutils.timestamp() # regex from tempfile's _RandomNameSequence characters _regex = '^/tmp/file-mistral-action[A-Za-z0-9_]{6}$' if (not isinstance(self.path, six.string_types) or not re.match(_regex, self.path)): msg = "Path does not match %s" % _regex return actions.Result(error={"msg": msg}) try: headers, objects = swift.get_container(self.container) for o in objects: swift.delete_object(self.container, o['name']) swiftutils.create_and_upload_tarball(swift_service, self.path, self.container, tarball_name, delete_after=sys.maxsize) except swiftexceptions.ClientException as err: msg = "Error attempting an operation on container: %s" % err return actions.Result(error={"msg": six.text_type(msg)}) except (OSError, IOError) as err: msg = "Error while writing file: %s" % err return actions.Result(error={"msg": six.text_type(msg)}) except processutils.ProcessExecutionError as err: msg = "Error while creating a tarball: %s" % err return actions.Result(error={"msg": six.text_type(msg)}) except Exception as err: msg = "Error exporting logs: %s" % err return actions.Result(error={"msg": six.text_type(msg)}) msg = "Saved tarball of directory: %s in Swift container: %s" % ( self.path, self.container) return actions.Result(data={"msg": msg})
def test_with_items_action_context(self): wb_service.create_workbook_v2(WB_ACTION_CONTEXT) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=WF_INPUT_URLS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] act_exs = task_ex.executions self.engine.on_action_complete( act_exs[0].id, actions_base.Result("Ivan") ) self.engine.on_action_complete( act_exs[1].id, actions_base.Result("John") ) self.engine.on_action_complete( act_exs[2].id, actions_base.Result("Mistral") ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state)
def run(self, context): try: swift = self.get_object_client(context) # Upload template dir to tmp container container_tmp = '%s-tmp' % self.container with tempfile.NamedTemporaryFile() as tmp_tarball: tarball.create_tarball(self.templates_dir, tmp_tarball.name) tarball.tarball_extract_to_swift_container( swift, tmp_tarball.name, container_tmp) # Get all new templates: new_templates = swift.get_object(container_tmp, '')[1].splitlines() old_templates = swift.get_object(self.container, '')[1].splitlines() exclude_user_data = [ constants.PLAN_ENVIRONMENT, constants.OVERCLOUD_J2_ROLES_NAME, constants.OVERCLOUD_J2_NETWORKS_NAME, constants.OVERCLOUD_J2_EXCLUDES ] # Update the old container for new in new_templates: # if doesn't exist, push it: if new not in old_templates: swift.put_object(self.container, new, swift.get_object(container_tmp, new)[1]) else: content_new = swift.get_object(container_tmp, new) content_old = swift.get_object(self.container, new) if (not content_new == content_old and new not in exclude_user_data): swift.put_object( self.container, new, swift.get_object(container_tmp, new)[1]) except swiftexceptions.ClientException as err: msg = "Error attempting an operation on container: %s" % err LOG.exception(msg) return actions.Result(error=msg) except Exception as err: msg = "Error while updating plan: %s" % err LOG.exception(msg) return actions.Result(error=msg)
def test_run_repo_failure(self, mock_repo_clone, mock_mkdtemp): mock_mkdtemp.return_value = self.temp_url mock_repo_clone.side_effect = git.exc.GitCommandError action = vcs.GitCloneAction(self.container, self.git_url) result = action.run(self.ctx) expected = actions.Result( error="Error cloning remote repository: %s " % self.git_url) mock_mkdtemp.assert_called() mock_repo_clone.assert_called_with(self.git_url, self.temp_url) self.assertEqual(result, expected)
def run(self, context): heat = self.get_orchestration_client(context) swift = self.get_object_client(context) mistral = self.get_workflow_client(context) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) try: stack_env = heat.stacks.environment(stack_id=self.container) except heat_exc.HTTPNotFound: stack_env = None passwords = password_utils.generate_passwords(mistral, stack_env) # if passwords don't yet exist in plan environment if 'passwords' not in env: env['passwords'] = {} # ensure all generated passwords are present in plan env, # but respect any values previously generated and stored for name, password in passwords.items(): if name not in env['passwords']: env['passwords'][name] = password try: plan_utils.put_env(swift, env) except swiftexceptions.ClientException as err: err_msg = "Error uploading to container: %s" % err LOG.exception(err_msg) return actions.Result(error=err_msg) self.cache_delete(context, self.container, "tripleo.parameters.get") return env['passwords']
def run(self, context): data = None error = None try: oc = self.get_object_client(context) oc.head_container(self.container) container_url = "{}/{}".format(oc.url, self.container) auth_key = context.auth_token data = {'container_url': container_url, 'auth_key': auth_key} except Exception as err: error = str(err) return actions.Result(data=data, error=error)
def run(self, context): orchestration_client = self.get_orchestration_client(context) swift = self.get_object_client(context) try: stack = orchestration_client.stacks.get(self.container) except heat_exc.HTTPNotFound: error = ( "The Heat stack {} could not be found. Make sure you have " "deployed before calling this action.").format(self.container) return actions.Result(error=error) # We need to check parameter_defaults first for a user provided # password. If that doesn't exist, we then should look in the # automatically generated passwords. # TODO(d0ugal): Abstract this operation somewhere. We shouldn't need to # know about the structure of the environment to get a password. try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.error(err_msg) return actions.Result(error=err_msg) try: parameter_defaults = env['parameter_defaults'] passwords = env['passwords'] admin_pass = parameter_defaults.get('AdminPassword') if admin_pass is None: admin_pass = passwords['AdminPassword'] except KeyError: error = ("Unable to find the AdminPassword in the plan " "environment.") return actions.Result(error=error) region_name = parameter_defaults.get('KeystoneRegion') return overcloudrc.create_overcloudrc(stack, self.no_proxy, admin_pass, region_name)
def run(self, context): try: swift = self.get_object_client(context) roles_data = yaml.safe_load( swift.get_object(self.container, constants.OVERCLOUD_J2_ROLES_NAME)[1]) except Exception as err: err_msg = ("Error retrieving roles data from deployment plan: %s" % err) LOG.exception(err_msg) return actions.Result(error=err_msg) return [role['name'] for role in roles_data]
def _send_result_to_parent_workflow(self): if self.wf_ex.state == states.SUCCESS: result = ml_actions.Result(data=self.wf_ex.output) elif self.wf_ex.state == states.ERROR: err_msg = (self.wf_ex.state_info or 'Failed subworkflow [execution_id=%s]' % self.wf_ex.id) result = ml_actions.Result(error=err_msg) elif self.wf_ex.state == states.CANCELLED: err_msg = (self.wf_ex.state_info or 'Cancelled subworkflow [execution_id=%s]' % self.wf_ex.id) result = ml_actions.Result(error=err_msg, cancel=True) else: raise RuntimeError( "Method _send_result_to_parent_workflow() must never be called" " if a workflow is not in SUCCESS, ERROR or CANCELLED state.") action_queue.schedule_on_action_complete(self.wf_ex.id, result, wf_action=True)
def test_with_items_action_context(self): # TODO(rakhmerov): Seems like the name of the test is not valid # anymore since there's nothing related to action context in it. # We need to revisit and refactor the entire module. wb_service.create_workbook_v2(WB_ACTION_CONTEXT) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input={'items': [1, 2, 3]}) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] act_exs = task_ex.executions self.engine.on_action_complete(act_exs[0].id, actions_base.Result("Ivan")) self.engine.on_action_complete(act_exs[1].id, actions_base.Result("John")) self.engine.on_action_complete(act_exs[2].id, actions_base.Result("Mistral")) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state)
def run(self, context): dpdk_core_list = [] numa_cpus_info = self.inspect_data.get('numa_topology', {}).get('cpus', []) # Checks whether numa topology cpus information is not available # in introspection data. if not numa_cpus_info: msg = 'Introspection data does not have numa_topology.cpus' return actions.Result(error=msg) # Checks whether CPU physical cores count for each NUMA nodes is # not available if not self.numa_nodes_cores_count: msg = ('CPU physical cores count for each NUMA nodes ' 'is not available') return actions.Result(error=msg) numa_nodes_threads = {} # Creates list for all available threads in each NUMA node for cpu in numa_cpus_info: if not cpu['numa_node'] in numa_nodes_threads: numa_nodes_threads[cpu['numa_node']] = [] numa_nodes_threads[cpu['numa_node']].extend(cpu['thread_siblings']) for node, node_cores_count in enumerate(self.numa_nodes_cores_count): # Gets least thread in NUMA node numa_node_min = min(numa_nodes_threads[node]) cores_count = node_cores_count for cpu in numa_cpus_info: if cpu['numa_node'] == node: # Adds threads from core which is not having least thread if numa_node_min not in cpu['thread_siblings']: dpdk_core_list.extend(cpu['thread_siblings']) cores_count -= 1 if cores_count == 0: break return ','.join([str(thread) for thread in dpdk_core_list])
def run(self, context): err_msg = "" # validate roles in input exist in roles directory in t-h-t try: roles_utils.check_role_exists( [role['name'] for role in self.available_roles], [role['name'] for role in self.roles]) except Exception as chk_err: err_msg = str(chk_err) # validate role yaml for role in self.roles: try: roles_utils.validate_role_yaml(yaml.safe_dump([role])) except exception.RoleMetadataError as rme: if 'name' in role: err_msg += "\n%s for %s" % (str(rme), role['name']) else: err_msg += "\n%s" % str(rme) if err_msg: return actions.Result(error=err_msg) return actions.Result(data=True)
def run(self, context): network_data_to_save = self.networks or [] # if replace_all flag is true, discard current networks and save input # if replace_all flag is false, merge input into current networks if not self.replace_all: # merge the networks_data and the network_input into networks # to be saved network_data_to_save = [net for net in { x['name']: x for x in self.current_networks + self.networks }.values()] return actions.Result(data={'network_data': network_data_to_save})
def _prepare_output(self, result): # In case of error, we don't transform a result. if not result.is_error(): adhoc_action_spec = spec_parser.get_action_spec( self.adhoc_action_def.spec) transformer = adhoc_action_spec.get_output() if transformer is not None: result = ml_actions.Result(data=expr.evaluate_recursively( transformer, result.data), error=result.error) return result
def test_put_error_with_unknown_reason_output_none(self, f): f.return_value = ERROR_ACTION_EX_FOR_EMPTY_OUTPUT resp = self.app.put_json( '/v2/action_executions/123', ERROR_ACTION_WITH_NONE_OUTPUT ) self.assertEqual(200, resp.status_int) self.assertDictEqual(ERROR_ACTION_FOR_EMPTY_OUTPUT, resp.json) f.assert_called_once_with( ERROR_ACTION_FOR_EMPTY_OUTPUT['id'], ml_actions.Result(error=DEFAULT_ERROR_OUTPUT) )
def run(self, context): pid_file = tempfile.gettempdir() + os.sep + "mysqldump.pid" if os.path.exists(pid_file): msg = 'Another Backup process is running' return actions.Result(error={"msg": six.text_type(msg)}) lockfile = open(pid_file, 'w') lockfile.write("%s\n" % os.getpid()) lockfile.close # Backup all databases with nice and ionice just not to create # a huge load on undercloud. Output will be redirected to mysqldump # variable and will be gzipped. strlocals = self.__dict__ script = ("#!/bin/bash\n" "nice -n 19 ionice -c2 -n7 \\\n" " mysqldump -h'%(dbhost)s' \\\n" " -u'%(dbuser)s' -p'%(dbpassword)s' \\\n" " --opt --all-databases |\\\n" " gzip > %(backup_name)s\n") % strlocals proc_failed = False try: subprocess.check_call(script, shell=True) except subprocess.CalledProcessError: proc_failed = True msg = 'Database dump failed. Deleting temporary directory' os.remove(self.backup_name) else: msg = 'Database dump created succesfully' finally: os.remove(pid_file) if proc_failed: return actions.Result(error={'msg': six.text_type(msg)}) else: return actions.Result(data={'msg': six.text_type(msg)})
def run(self, context): swift = self.get_object_client(context) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) for k, v in self.environments.items(): found = False if {'path': k} in env['environments']: found = True if v: if not found: env['environments'].append({'path': k}) else: if found: env['environments'].remove({'path': k}) if self.purge_missing: for e in env['environments']: if e.get('path') not in self.environments: env['environments'].remove(e) self.cache_delete(context, self.container, "tripleo.parameters.get") try: plan_utils.put_env(swift, env) except swiftexceptions.ClientException as err: err_msg = "Error uploading to container: %s" % err LOG.exception(err_msg) return actions.Result(error=err_msg) return env
def run(self, context): baremetal_client = self.get_baremetal_client(context) image_client = self.get_image_client(context) try: image_ids = {'kernel': None, 'ramdisk': None} if self.kernel_name is not None and self.ramdisk_name is not None: image_ids = glance.create_or_find_kernel_and_ramdisk( image_client, self.kernel_name, self.ramdisk_name) node = baremetal_client.node.get(self.node_uuid) capabilities = node.properties.get('capabilities', {}) capabilities = nodes.capabilities_to_dict(capabilities) if self.instance_boot_option is not None: capabilities['boot_option'] = self.instance_boot_option capabilities = nodes.dict_to_capabilities(capabilities) baremetal_client.node.update(node.uuid, [ { 'op': 'add', 'path': '/properties/capabilities', 'value': capabilities, }, { 'op': 'add', 'path': '/driver_info/deploy_ramdisk', 'value': image_ids['ramdisk'], }, { 'op': 'add', 'path': '/driver_info/deploy_kernel', 'value': image_ids['kernel'], }, { 'op': 'add', 'path': '/driver_info/rescue_ramdisk', 'value': image_ids['ramdisk'], }, { 'op': 'add', 'path': '/driver_info/rescue_kernel', 'value': image_ids['kernel'], }, ]) LOG.debug("Configuring boot option for Node %s", self.node_uuid) except Exception as err: LOG.exception("Error configuring node boot options with Ironic.") return actions.Result(error=six.text_type(err))
def test_unexisting_join_task_does_not_stuck_wf_running(self): wf_text = """--- version: '2.0' wf: tasks: branch1: action: std.noop on-success: branch1-23_merge branch2: action: std.async_noop on-success: branch2-3_merge branch3: action: std.fail on-success: branch2-3_merge branch2-3_merge: action: std.noop on-success: branch1-23_merge join: all branch1-23_merge: action: std.noop join: all """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions t_ex = self._assert_single_item( task_execs, name='branch2' ) t_action_exs = db_api.get_action_executions( task_execution_id=t_ex.id ) self.engine.on_action_complete( t_action_exs[0].id, ml_actions.Result(error="Error!") ) self.await_workflow_error(wf_ex.id)
def run(self, context): try: swift = self.get_object_client(context) roles_data = yaml.safe_load(swiftutils.get_object_string( swift, self.container, self.role_file_name)) except Exception as err: err_msg = ("Error retrieving roles data from deployment plan: %s" % err) LOG.exception(err_msg) return actions.Result(error=err_msg) if self.detail: return roles_data else: return [role['name'] for role in roles_data]