def check_for_subplaybook_usages(self, file_path: str, current_playbook_id: str, new_playbook_id: str) -> None: """Check if the current_playbook_id appears in the file's playbook type tasks and change it if needed. Arguments: file_path (str): The file path to check. current_playbook_id (str): The current playbook ID. new_playbook_id (str): The new playbook ID. """ updated_tasks = [] # if the changed file is a playbook get it's data if find_type(file_path) in [FileType.PLAYBOOK, FileType.TEST_PLAYBOOK]: playbook_data = get_yaml(file_path) # go through all the tasks for task_id, task_data in playbook_data.get('tasks').items(): # if a task is of playbook type if task_data.get('type') == 'playbook': id_key = 'playbookId' if 'playbookId' in task_data.get( 'task') else 'playbookName' # make sure the playbookId or playbookName use the new id and not the old if task_data.get('task', {}).get(id_key) == current_playbook_id: playbook_data['tasks'][task_id]['task'][ id_key] = new_playbook_id updated_tasks.append(task_id) # if any tasks were changed re-write the playbook if updated_tasks: if self.verbose: click.echo( f'Found usage of playbook in {file_path} tasks: ' f'{" ".join(updated_tasks)} - Updating playbookId') write_yml(file_path, playbook_data)
def save_dict_as_yaml_integration_file(self, output_file: str): """Save the dict to an output file.""" if self.verbose: click.secho(f"Writing collected metadata to {output_file}.") write_yml(output_file, self.metadata_dict) click.secho("Finished successfully.", fg='green')
def write_desc(c_index, final_output, o_index, output_path, verbose, yml_data): ''' Write a description to disk ''' if verbose: logger.debug(f"Writing: {final_output}\n---") yml_data['script']['commands'][c_index]['outputs'][o_index][ 'description'] = final_output write_yml(output_path, yml_data)
def test_generate_integration_context(mocker, tmpdir): """ Given - A yaml file and fake example file When - generating the yml ouputs from the examples Then - Ensure the outputs are inserted correctly """ from demisto_sdk.commands.generate_outputs.generate_context import \ generate_integration_context command_name = 'zoom-fetch-recording' mocker.patch.object(generate_integration_context, 'build_example_dict', return_value=( { command_name: [(None, None, json.dumps(FAKE_OUTPUTS_1)), (None, None, json.dumps(FAKE_OUTPUTS_2))] }, [])) # Temp file to check filename = os.path.join(tmpdir.strpath, 'fake_integration.yml') write_yml(filename, FAKE_INTEGRATION_YML) # Make sure that there are no outputs yml_data = get_yaml(filename) for command in yml_data['script']['commands']: if command.get('name') == command_name: command['outputs'] = '' break else: raise AssertionError(f'command {command_name} is not found in yml_data') generate_integration_context.generate_integration_context(filename, FAKE_EXAMPLES_FILE, verbose=False) # Check we have new data yml_data = get_yaml(filename) for command in yml_data['script']['commands']: if command.get('name') == command_name: assert command['outputs'] == FAKE_OUTPUT_CONTEXTS break else: raise AssertionError(f'command {command_name} is not found in yml_data')
def generate_integration_context(input_path: str, examples: Optional[str] = None, insecure: bool = False, verbose: bool = False, output_path: Optional[str] = None): """ Generate integration command contexts in-place. Args: output_path: Output path input_path: path to the yaml integration. examples: path to the command examples. insecure: should use insecure. verbose: verbose (debug mode). """ if not output_path: output_path = input_path try: yml_data = get_yaml(input_path) # Parse examples file example_dict = generate_example_dict(examples, insecure) for command in example_dict: print_v(f'Building context for the {command} command...', verbose) example = example_dict.get(command) # Generate the examples with a local server for _, _, outputs in example: output_with_contexts = dict_from_outputs_str(command, outputs, verbose=verbose) output_contexts = output_with_contexts.get('outputs') yml_data = insert_outputs(yml_data, command, output_contexts) # Make the changes in place the input yml print_success(f'Writing outputs to {output_path}') write_yml(output_path, yml_data) except ValueError as ex: if verbose: raise else: print_error(f'Error: {str(ex)}') return 1 return 0
def generate_ai_descriptions( input_path: str, output_path: str = "out.yml", interactive: bool = True, verbose: bool = False, insecure: bool = False, ): """ Generate integration command contexts. Args: input_path: path to the yaml integration input path output_path: path to the yaml integration output path interactive: interactivity (correct ai result mistakes) verbose: verbose (debug mode) insecure: insecure https (debug mode) """ print_experimental() if verbose: logger.setLevel(logging.DEBUG) try: similar_paths: Dict[str, str] = {} yml_data = get_yaml(input_path) # Fix sometimes the yml doesn't contain the full structure # (json-to-outputs) if 'script' not in yml_data: yml_data = {'script': {'commands': [yml_data]}} commands = yml_data.get("script", {}) commands = commands.get('commands', []) # Iterate over every command for c_index, command in enumerate(commands): command_name = command.get('name') if interactive or verbose: logger.debug(f'Command: {command_name}') outputs = command.get('outputs') if not outputs: if interactive or verbose: logger.debug("-- Skipping because no outputs for command") continue # Iterate over every output per command for o_index, o in enumerate(outputs): command_output_path = o.get('contextPath') # Match past paths automatically. if command_output_path in similar_paths: print( f"\n--Already added description for exact path: {command_output_path}--") final_output = similar_paths.get(command_output_path) print(f"Last output was: '{final_output}'") y = input("Should we use it (y/n)? ").lower() if y == 'y' or y == 'yes': print('Using last seen output.') yml_data['script']['commands'][c_index]['outputs'][ o_index][ 'description'] = final_output write_yml(output_path, yml_data) continue else: print("Asking again...") # Print the progress and current context path if interactive or verbose: print(f'\n{o_index + 1}/{len(outputs)}') print(f'Command: {command_name}') print(f'Context path:\t\t{command_output_path}') output = "No result from GPT." # Generate a description with AI21 API (retry twice in case we # have a long prompt we need to clear). for _exception in range(2): try: output = generate_desc_with_spinner(command_output_path, insecure, output, verbose) break except requests.exceptions.RequestException as e: print('Failed AI description request: ', e) remove_last_prompt() continue final_output = output # Correct the description if needed interactively if interactive: final_output = correct_interactively(command_output_path, final_output, output) # Write the final description to the file (backup) write_desc(c_index, final_output, o_index, output_path, verbose, yml_data) # Update the similar context paths similar_paths[command_output_path] = str(final_output) # Backup the prompt for later usage (in case we cleared the prompt) if DEBUG_PROMPT: with open(f"backup_prompt_ai_{c_index}.txt", "w") as f: f.write(get_current_prompt()) except Exception as ex: print_error(f'Error: {str(ex)}') # backup all of the prompts (without truncating) if DEBUG_PROMPT: with open("history_prompt.txt", "w") as f: f.write(json.dumps(PROMPT_HISTORY))