def create_object(cls, model_alias: str, object_type: Type[TLO], args: argparse.Namespace) -> int: """Create a top level OSCAL object within the trestle directory, leveraging functionality in add.""" log.set_log_level_from_args(args) trestle_root = fs.get_trestle_project_root(Path.cwd()) if not trestle_root: logger.error( f'Current working directory {Path.cwd()} is not with a trestle project.' ) return 1 plural_path: str # Cater to POAM if model_alias[-1] == 's': plural_path = model_alias else: plural_path = model_alias + 's' desired_model_dir = trestle_root / plural_path / args.name desired_model_path = desired_model_dir / (model_alias + '.' + args.extension) if desired_model_path.exists(): logger.error( f'OSCAL file to be created here: {desired_model_path} exists.') logger.error('Aborting trestle create.') return 1 # Create sample model. sample_model = generators.generate_sample_model(object_type) # Presuming top level level model not sure how to do the typing for this. sample_model.metadata.title = f'Generic {model_alias} created by trestle.' # type: ignore sample_model.metadata.last_modified = datetime.now().astimezone() sample_model.metadata.oscal_version = trestle.oscal.OSCAL_VERSION sample_model.metadata.version = '0.0.0' top_element = Element(sample_model, model_alias) create_action = CreatePathAction(desired_model_path.absolute(), True) write_action = WriteFileAction( desired_model_path.absolute(), top_element, FileContentType.to_content_type(desired_model_path.suffix)) # create a plan to write the directory and file. try: create_plan = Plan() create_plan.add_action(create_action) create_plan.add_action(write_action) create_plan.simulate() create_plan.execute() return 0 except Exception as e: logger.error( 'Unknown error executing trestle create operations. Rolling back.' ) logger.debug(e) return 1
def assemble_model(cls, model_alias: str, object_type: Type[TLO], args: argparse.Namespace) -> int: """Assemble a top level OSCAL model within the trestle dist directory.""" log.set_log_level_from_args(args) trestle_root = fs.get_trestle_project_root(Path.cwd()) if not trestle_root: logger.error( f'Current working directory {Path.cwd()} is not with a trestle project.' ) return 1 if not trestle_root == Path.cwd(): logger.error( f'Current working directory {Path.cwd()} is not the top level trestle project directory.' ) return 1 # contruct path to the model file name root_model_dir = Path.cwd() / f'{model_alias}s' try: model_file_type = fs.get_contextual_file_type(root_model_dir / args.name) except Exception as e: logger.error('No files found in the specified model directory.') logger.debug(e) return 1 model_file_name = f'{model_alias}{FileContentType.to_file_extension(model_file_type)}' root_model_filepath = root_model_dir / args.name / model_file_name if not root_model_filepath.exists(): logger.error(f'No top level model file at {root_model_dir}') return 1 # distributed load _, _, assembled_model = load_distributed(root_model_filepath) assembled_model_filepath = trestle_root / const.TRESTLE_DIST_DIR / f'{model_alias}.{args.extension}' plan = Plan() plan.add_action(CreatePathAction(assembled_model_filepath, True)) plan.add_action( WriteFileAction( assembled_model_filepath, Element(assembled_model), FileContentType.to_content_type(f'.{args.extension}'))) try: plan.simulate() plan.execute() return 0 except Exception as e: logger.error( 'Unknown error executing trestle create operations. Rolling back.' ) logger.debug(e) return 1
def add(cls, file_path, element_path, parent_model, parent_element): """For a file_path and element_path, add a child model to the parent_element of a given parent_model. First we find the child model at the specified element path and instantiate it with default values. Then we check if there's already existing element at that path, in which case we append the child model to the existing list of dict. Then we set up an action plan to update the model (specified by file_path) in memory, create a file at the same location and write the file. """ element_path_list = element_path.get_full_path_parts() if '*' in element_path_list: raise err.TrestleError( 'trestle add does not support Wildcard element path.') # Get child model try: child_model = utils.get_target_model(element_path_list, parent_model) # Create child element with sample values child_object = utils.get_sample_model(child_model) if parent_element.get_at(element_path) is not None: # The element already exists if type(parent_element.get_at(element_path)) is list: child_object = parent_element.get_at( element_path) + child_object elif type(parent_element.get_at(element_path)) is dict: child_object = { **parent_element.get_at(element_path), **child_object } else: raise err.TrestleError( 'Already exists and is not a list or dictionary.') except Exception as e: raise err.TrestleError(f'Bad element path. {str(e)}') update_action = UpdateAction(sub_element=child_object, dest_element=parent_element, sub_element_path=element_path) create_action = CreatePathAction(file_path.absolute(), True) write_action = WriteFileAction( file_path.absolute(), parent_element, FileContentType.to_content_type(file_path.suffix)) add_plan = Plan() add_plan.add_action(update_action) add_plan.add_action(create_action) add_plan.add_action(write_action) add_plan.simulate() add_plan.execute()
def write(self, model: OscalBaseModel) -> bool: """Write OSCAL model to repository.""" logger.debug(f'Writing model {self._model_name}.') model_alias = classname_to_alias(model.__class__.__name__, AliasMode.JSON) if parser.to_full_model_name(model_alias) is None: raise TrestleError(f'Given model {model_alias} is not a top level model.') # split directory if the model was split split_dir = pathlib.Path(self.model_dir, self.model_alias) # Prepare actions; delete split model dir if any, recreate model file, and write to filepath top_element = Element(model) remove_action = RemovePathAction(split_dir) create_action = CreatePathAction(self.filepath, True) write_action = WriteFileAction(self.filepath, top_element, self.file_content_type) # create a plan to create the directory and imported file. import_plan = Plan() import_plan.add_action(remove_action) import_plan.add_action(create_action) import_plan.add_action(write_action) import_plan.execute() logger.debug(f'Model {self._model_name} written to repository') return True
def assemble_model(cls, model_alias: str, args: argparse.Namespace) -> int: """Assemble a top level OSCAL model within the trestle dist directory.""" log.set_log_level_from_args(args) logger.info(f'Assembling models of type {model_alias}.') trestle_root = args.trestle_root # trestle root is set via command line in args. Default is cwd. if not trestle_root or not file_utils.is_valid_project_root( args.trestle_root): raise TrestleRootError( f'Given directory {trestle_root} is not a trestle project.') model_names = [] if args.name: model_names = [args.name] logger.info( f'Assembling single model of type {model_alias}: {args.name}.') else: model_names = ModelUtils.get_models_of_type( model_alias, trestle_root) nmodels = len(model_names) logger.info( f'Assembling {nmodels} found models of type {model_alias}.') if len(model_names) == 0: logger.info(f'No models found to assemble of type {model_alias}.') return CmdReturnCodes.SUCCESS.value for model_name in model_names: # contruct path to the model file name root_model_dir = trestle_root / ModelUtils.model_type_to_model_dir( model_alias) model_file_type = file_utils.get_contextual_file_type( root_model_dir / model_name) model_file_name = f'{model_alias}{FileContentType.to_file_extension(model_file_type)}' root_model_filepath = root_model_dir / model_name / model_file_name if not root_model_filepath.exists(): raise TrestleError( f'No top level model file at {root_model_dir}') # distributed load _, _, assembled_model = ModelUtils.load_distributed( root_model_filepath, args.trestle_root) plural_alias = ModelUtils.model_type_to_model_dir(model_alias) assembled_model_dir = trestle_root / const.TRESTLE_DIST_DIR / plural_alias assembled_model_filepath = assembled_model_dir / f'{model_name}.{args.extension}' plan = Plan() plan.add_action(CreatePathAction(assembled_model_filepath, True)) plan.add_action( WriteFileAction( assembled_model_filepath, Element(assembled_model), FileContentType.to_content_type(f'.{args.extension}'))) plan.execute() return CmdReturnCodes.SUCCESS.value
def replicate_object(cls, model_alias: str, args: argparse.Namespace) -> int: """ Core replicate routine invoked by subcommands. Args: model_alias: Name of the top level model in the trestle directory. args: CLI arguments Returns: A return code that can be used as standard posix codes. 0 is success. """ logger.debug('Entering replicate_object.') # 1 Bad working directory if not running from current working directory trestle_root = args.trestle_root # trestle root is set via command line in args. Default is cwd. if not trestle_root or not file_utils.is_valid_project_root(trestle_root): raise TrestleError(f'Given directory: {trestle_root} is not a trestle project.') plural_path = ModelUtils.model_type_to_model_dir(model_alias) # 2 Check that input file given exists. input_file_stem = trestle_root / plural_path / args.name / model_alias content_type = FileContentType.path_to_content_type(input_file_stem) if content_type == FileContentType.UNKNOWN: raise TrestleError( f'Input file {args.name} has no json or yaml file at expected location {input_file_stem}.' ) input_file = input_file_stem.with_suffix(FileContentType.to_file_extension(content_type)) # 3 Distributed load from file _, model_alias, model_instance = ModelUtils.load_distributed(input_file, trestle_root) rep_model_path = trestle_root / plural_path / args.output / ( model_alias + FileContentType.to_file_extension(content_type) ) if rep_model_path.exists(): raise TrestleError(f'OSCAL file to be replicated here: {rep_model_path} exists.') if args.regenerate: logger.debug(f'regenerating uuids for model {input_file}') model_instance, uuid_lut, n_refs_updated = ModelUtils.regenerate_uuids(model_instance) logger.debug(f'{len(uuid_lut)} uuids generated and {n_refs_updated} references updated') # 4 Prepare actions and plan top_element = Element(model_instance) create_action = CreatePathAction(rep_model_path, True) write_action = WriteFileAction(rep_model_path, top_element, content_type) # create a plan to create the directory and imported file. replicate_plan = Plan() replicate_plan.add_action(create_action) replicate_plan.add_action(write_action) replicate_plan.execute() return CmdReturnCodes.SUCCESS.value
def test_split_model_at_path_chain_failures( tmp_path, simplified_nist_catalog: oscatalog.Catalog) -> None: """Test for split_model_at_path_chain method failure scenarios.""" content_type = FileContentType.JSON # prepare trestle project dir with the file catalog_dir, catalog_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, simplified_nist_catalog, test_utils.CATALOGS_DIR) split_plan = Plan() element_paths = [ElementPath('catalog.metadata.parties.*')] # no plan should error with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(simplified_nist_catalog, element_paths, catalog_dir, content_type, 0, None, False, '', None) # negative path index should error with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(simplified_nist_catalog, element_paths, catalog_dir, content_type, -1, split_plan, False, '', None) # too large path index should return the path index cur_path_index = len(element_paths) + 1 SplitCmd.split_model_at_path_chain(simplified_nist_catalog, element_paths, catalog_dir, content_type, cur_path_index, split_plan, False, '', None)
def test_merge_everything_into_catalog_with_hidden_files_in_folders( testdata_dir, tmp_trestle_dir): """Test trestle merge -e 'catalog.*' when metadata and catalog are split and hidden files are present.""" # Assume we are running a command like below # trestle merge -e catalog.* content_type = FileContentType.JSON fext = FileContentType.to_file_extension(content_type) # prepare trestle project dir with the file test_utils.ensure_trestle_config_dir(tmp_trestle_dir) test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = Path('catalogs/') mycatalog_dir = catalogs_dir / 'mycatalog' # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.copytree(test_data_source, catalogs_dir) # Change directory to mycatalog_dir os.chdir(mycatalog_dir) catalog_file = Path(f'catalog{fext}').resolve() assert catalog_file.exists() # Read files # Create hand-crafter merge plan expected_plan: Plan = Plan() reset_destination_action = CreatePathAction(catalog_file, clear_content=True) expected_plan.add_action(reset_destination_action) _, _, merged_catalog_instance = ModelUtils.load_distributed( catalog_file, tmp_trestle_dir) element = Element(merged_catalog_instance) write_destination_action = WriteFileAction(catalog_file, element, content_type=content_type) expected_plan.add_action(write_destination_action) delete_element_action = RemovePathAction(Path('catalog').resolve()) expected_plan.add_action(delete_element_action) test_utils.make_hidden_file(tmp_trestle_dir / 'catalogs/mycatalog/.DS_Store') test_utils.make_hidden_file(tmp_trestle_dir / 'catalogs/mycatalog/catalog/.DS_Store') test_utils.make_hidden_file( tmp_trestle_dir / 'catalogs/mycatalog/catalog/metadata/.DS_Store') test_utils.make_hidden_file(tmp_trestle_dir / 'catalogs/mycatalog/catalog/groups/.DS_Store') # Call merge() generated_plan = MergeCmd.merge(Path.cwd(), ElementPath('catalog.*'), tmp_trestle_dir) # Assert the generated plan matches the expected plan' assert generated_plan == expected_plan
def _run(self, args: argparse.Namespace) -> int: """Remove an OSCAL component/subcomponent to the specified component. This method takes input a filename and a list of comma-seperated element path. Element paths are field aliases. The method first finds the parent model from the file and loads the file into the model. Then the method executes 'remove' for each of the element paths specified. """ try: log.set_log_level_from_args(args) args_dict = args.__dict__ file_path = pathlib.Path(args_dict[const.ARG_FILE]).resolve() relative_path = file_path.relative_to(args.trestle_root) # Get parent model and then load json into parent model parent_model, parent_alias = ModelUtils.get_relative_model_type( relative_path) parent_object = parent_model.oscal_read(file_path) parent_element = Element(parent_object, parent_alias) add_plan = Plan() # Do _remove for each element_path specified in args element_paths: List[str] = str( args_dict[const.ARG_ELEMENT]).split(',') for elm_path_str in element_paths: element_path = ElementPath(elm_path_str) remove_action, parent_element = self.remove( element_path, parent_element) add_plan.add_action(remove_action) create_action = CreatePathAction(file_path, True) write_action = WriteFileAction( file_path, parent_element, FileContentType.to_content_type(file_path.suffix)) add_plan.add_action(remove_action) add_plan.add_action(create_action) add_plan.add_action(write_action) add_plan.execute() return CmdReturnCodes.SUCCESS.value except Exception as e: return err.handle_generic_command_exception( e, logger, 'Error while removing OSCAL component')
def test_split_model_at_path_chain_failures(tmp_path, sample_catalog: oscatalog.Catalog): """Test for split_model_at_path_chain method failure scenarios.""" content_type = FileContentType.JSON # prepare trestle project dir with the file catalog_dir, catalog_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_catalog, test_utils.CATALOGS_DIR) split_plan = Plan() element_paths = [ElementPath('catalog.metadata.parties.*')] # long chain of path should error with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, 0, split_plan, False) # no plan should error with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, 0, None, False) # negative path index should error with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, -1, split_plan, False) # too large path index should return the path index cur_path_index = len(element_paths) + 1 SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, cur_path_index, split_plan, False) # invalid model path should return withour doing anything element_paths = [ElementPath('catalog.meta')] cur_path_index = 0 SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, cur_path_index, split_plan, False) # invalid path for multi item sub-model p0 = ElementPath( 'catalog.uuid.*' ) # uuid exists, but it is not a multi-item sub-model object p1 = ElementPath( 'uuid.metadata.*', p0 ) # this is invalid but we just need a path with the p0 as the parent element_paths = [p0, p1] with pytest.raises(TrestleError): SplitCmd.split_model_at_path_chain(sample_catalog, element_paths, catalog_dir, content_type, 0, split_plan, False)
def test_split_model_plans( tmp_path: pathlib.Path, sample_nist_component_def: component.ComponentDefinition) -> None: """Test for split_model method.""" # Assume we are running a command like below # trestle split -f component-definition.yaml -e component-definition.metadata content_type = FileContentType.YAML # prepare trestle project dir with the file component_def_dir, component_def_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_nist_component_def, test_utils.COMPONENT_DEF_DIR) # read the model from file component_def = component.ComponentDefinition.oscal_read( component_def_file) element = Element(component_def) element_args = ['component-definition.metadata'] element_paths = cmd_utils.parse_element_args(None, element_args) # extract values metadata_file = component_def_dir / element_paths[0].to_file_path( content_type) metadata = element.get_at(element_paths[0]) root_file = component_def_dir / element_paths[0].to_root_path(content_type) remaining_root = element.get().stripped_instance( element_paths[0].get_element_name()) # prepare the plan expected_plan = Plan() expected_plan.add_action(CreatePathAction(metadata_file)) expected_plan.add_action( WriteFileAction(metadata_file, Element(metadata), content_type)) expected_plan.add_action(CreatePathAction(root_file, True)) expected_plan.add_action( WriteFileAction(root_file, Element(remaining_root), content_type)) split_plan = SplitCmd.split_model(component_def, element_paths, component_def_dir, content_type, '', None) assert expected_plan == split_plan
def split_model( cls, model_obj: OscalBaseModel, element_paths: List[ElementPath], base_dir: pathlib.Path, content_type: FileContentType, root_file_name: str, aliases_to_strip: Dict[str, AliasTracker] ) -> Plan: """Split the model at the provided element paths. It returns a plan for the operation """ # initialize plan split_plan = Plan() # loop through the element path list and update the split_plan stripped_field_alias = [] cur_path_index = 0 while cur_path_index < len(element_paths): # extract the sub element name for each of the root path of the path chain element_path = element_paths[cur_path_index] if element_path.get_parent() is None and len(element_path.get()) > 1: stripped_part = element_path.get()[1] if stripped_part == ElementPath.WILDCARD: stripped_field_alias.append('__root__') else: if stripped_part not in stripped_field_alias: stripped_field_alias.append(stripped_part) # split model at the path chain cur_path_index = cls.split_model_at_path_chain( model_obj, element_paths, base_dir, content_type, cur_path_index, split_plan, False, root_file_name, aliases_to_strip ) cur_path_index += 1 # strip the root model object and add a WriteAction stripped_root = model_obj.stripped_instance(stripped_fields_aliases=stripped_field_alias) # If it's an empty model after stripping the fields, don't create path and don't write if set(model_obj.__fields__.keys()) == set(stripped_field_alias): return split_plan if root_file_name != '': root_file = base_dir / root_file_name else: root_file = base_dir / element_paths[0].to_root_path(content_type) split_plan.add_action(CreatePathAction(root_file, True)) wrapper_alias = classname_to_alias(stripped_root.__class__.__name__, AliasMode.JSON) split_plan.add_action(WriteFileAction(root_file, Element(stripped_root, wrapper_alias), content_type)) return split_plan
def test_split_model(tmp_dir, sample_target_def: ostarget.TargetDefinition): """Test for split_model method.""" # Assume we are running a command like below # trestle split -f target-definition.yaml -e target-definition.metadata content_type = FileContentType.YAML # prepare trestle project dir with the file target_def_dir, target_def_file = test_utils.prepare_trestle_project_dir( tmp_dir, content_type, sample_target_def, test_utils.TARGET_DEFS_DIR) # read the model from file target_def = ostarget.TargetDefinition.oscal_read(target_def_file) element = Element(target_def) element_args = ['target-definition.metadata'] element_paths = cmd_utils.parse_element_args(element_args) # extract values metadata_file = target_def_dir / element_paths[0].to_file_path( content_type) metadata = element.get_at(element_paths[0]) root_file = target_def_dir / element_paths[0].to_root_path(content_type) remaining_root = element.get().stripped_instance( element_paths[0].get_element_name()) # prepare the plan expected_plan = Plan() expected_plan.add_action(CreatePathAction(metadata_file)) expected_plan.add_action( WriteFileAction(metadata_file, Element(metadata), content_type)) expected_plan.add_action(CreatePathAction(root_file, True)) expected_plan.add_action( WriteFileAction(root_file, Element(remaining_root), content_type)) split_plan = SplitCmd.split_model(target_def, element_paths, target_def_dir, content_type) assert expected_plan == split_plan
def split_model(cls, model_obj: OscalBaseModel, element_paths: List[ElementPath], base_dir: pathlib.Path, content_type: FileContentType, root_file_name: str = '') -> Plan: """Split the model at the provided element paths. It returns a plan for the operation """ # assume we ran the command below: # trestle split -f target.yaml # -e 'target-definition.metadata, # target-definition.targets.*.target-control-implementations.*' # initialize plan split_plan = Plan() # loop through the element path list and update the split_plan stripped_field_alias = [] cur_path_index = 0 while cur_path_index < len(element_paths): # extract the sub element name for each of the root path of the path chain element_path = element_paths[cur_path_index] if element_path.get_parent() is None and len( element_path.get()) > 1: stripped_part = element_path.get()[1] if stripped_part == ElementPath.WILDCARD: stripped_field_alias.append('__root__') else: stripped_field_alias.append(stripped_part) # split model at the path chain cur_path_index = cls.split_model_at_path_chain( model_obj, element_paths, base_dir, content_type, cur_path_index, split_plan, False, root_file_name) cur_path_index += 1 # strip the root model object and add a WriteAction stripped_root = model_obj.stripped_instance( stripped_fields_aliases=stripped_field_alias) if root_file_name != '': root_file = base_dir / root_file_name else: root_file = base_dir / element_paths[0].to_root_path(content_type) split_plan.add_action(CreatePathAction(root_file, True)) wrapper_alias = utils.classname_to_alias( stripped_root.__class__.__name__, 'json') split_plan.add_action( WriteFileAction(root_file, Element(stripped_root, wrapper_alias), content_type)) return split_plan
def create_object(cls, model_alias: str, object_type: Type[TopLevelOscalModel], args: argparse.Namespace) -> int: """Create a top level OSCAL object within the trestle directory, leveraging functionality in add.""" log.set_log_level_from_args(args) trestle_root = args.trestle_root # trestle root is set via command line in args. Default is cwd. if not trestle_root or not file_utils.is_valid_project_root( args.trestle_root): raise err.TrestleRootError( f'Given directory {trestle_root} is not a trestle project.') plural_path = ModelUtils.model_type_to_model_dir(model_alias) desired_model_dir = trestle_root / plural_path / args.output desired_model_path = desired_model_dir / (model_alias + '.' + args.extension) if desired_model_path.exists(): raise err.TrestleError( f'OSCAL file to be created here: {desired_model_path} exists.') # Create sample model. sample_model = generators.generate_sample_model( object_type, include_optional=args.include_optional_fields) # Presuming top level level model not sure how to do the typing for this. sample_model.metadata.title = f'Generic {model_alias} created by trestle named {args.output}.' # type: ignore sample_model.metadata.last_modified = datetime.now().astimezone() sample_model.metadata.oscal_version = trestle.oscal.OSCAL_VERSION sample_model.metadata.version = '0.0.0' top_element = Element(sample_model, model_alias) create_action = CreatePathAction(desired_model_path.resolve(), True) write_action = WriteFileAction( desired_model_path.resolve(), top_element, FileContentType.to_content_type(desired_model_path.suffix)) # create a plan to write the directory and file. create_plan = Plan() create_plan.add_action(create_action) create_plan.add_action(write_action) create_plan.execute() return CmdReturnCodes.SUCCESS.value
def _run(self, args: argparse.Namespace) -> int: """Add an OSCAL component/subcomponent to the specified component. This method takes input a filename and a list of comma-seperated element path. Element paths are field aliases. The method first finds the parent model from the file and loads the file into the model. Then the method executes 'add' for each of the element paths specified. """ log.set_log_level_from_args(args) try: args_dict = args.__dict__ file_path = pathlib.Path(args_dict[const.ARG_FILE]) # Get parent model and then load json into parent model parent_model, parent_alias = fs.get_stripped_contextual_model( file_path.absolute()) parent_object = parent_model.oscal_read(file_path.absolute()) # FIXME : handle YAML files after detecting file type parent_element = Element( parent_object, utils.classname_to_alias(parent_model.__name__, 'json')) add_plan = Plan() # Do _add for each element_path specified in args element_paths: List[str] = args_dict[const.ARG_ELEMENT].split(',') for elm_path_str in element_paths: element_path = ElementPath(elm_path_str) update_action, parent_element = self.add( element_path, parent_model, parent_element) add_plan.add_action(update_action) create_action = CreatePathAction(file_path.absolute(), True) write_action = WriteFileAction( file_path.absolute(), parent_element, FileContentType.to_content_type(file_path.suffix)) add_plan.add_action(create_action) add_plan.add_action(write_action) add_plan.simulate() add_plan.execute() except BaseException as err: logger.error(f'Add failed: {err}') return 1 return 0
def _run(self, args: argparse.Namespace) -> int: """Top level import run command.""" log.set_log_level_from_args(args) logger.debug('Entering import run.') # 1. Validate input arguments are as expected. # This code block may never be reached as the argument is declared to be required. # 1.1 Check that input file given exists. input_file = pathlib.Path(args.file) if not input_file.exists(): logger.error(f'Input file {args.file} does not exist.') return 1 # 1.2 Bad working directory if not running from current working directory cwd = pathlib.Path.cwd().resolve() trestle_root = fs.get_trestle_project_root(cwd) if trestle_root is None: logger.error( f'Current working directory: {cwd} is not within a trestle project.' ) return 1 # 2. Importing a file that is already inside a trestle-initialized dir is bad trestle_root = trestle_root.resolve() try: input_file.absolute().relative_to(trestle_root) except ValueError: # An exception here is good: it means that the input file is not inside a trestle dir. pass else: logger.error( 'Input file cannot be from current trestle project. Use duplicate instead.' ) return 1 # 3. Work out typing information from input suffix. try: content_type = FileContentType.to_content_type(input_file.suffix) except TrestleError as err: logger.debug(f'FileContentType.to_content_type() failed: {err}') logger.error( f'Import failed, could not work out content type from file suffix: {err}' ) return 1 # 4. Load input and parse for model # 4.1 Load from file try: data = fs.load_file(input_file.absolute()) except JSONDecodeError as err: logger.debug(f'fs.load_file() failed: {err}') logger.error(f'Import failed, JSON error loading file: {err}') return 1 except TrestleError as err: logger.debug(f'fs.load_file() failed: {err}') logger.error(f'Import failed, error loading file: {err}') return 1 except PermissionError as err: logger.debug(f'fs.load_file() failed: {err}') logger.error( f'Import failed, access permission error loading file: {err}') return 1 # 4.2 root key check try: parent_alias = parser.root_key(data) except TrestleError as err: logger.debug(f'parser.root_key() failed: {err}') logger.error( f'Import failed, failed to parse input file for root key: {err}' ) return 1 # 4.3 parse the model parent_model_name = parser.to_full_model_name(parent_alias) try: parent_model = parser.parse_file(input_file.absolute(), parent_model_name) except TrestleError as err: logger.debug(f'parser.parse_file() failed: {err}') logger.error( f'Import failed, failed to parse valid contents of input file: {err}' ) return 1 # 5. Work out output directory and file plural_path: str plural_path = parent_alias # Cater to POAM if parent_alias[-1] != 's': plural_path = parent_alias + 's' desired_model_dir = trestle_root / plural_path # args.output is presumed to be assured as it is declared to be required if args.output: desired_model_path = desired_model_dir / args.output / ( parent_alias + input_file.suffix) if desired_model_path.exists(): logger.error( f'OSCAL file to be created here: {desired_model_path} exists.') logger.error('Aborting trestle import.') return 1 # 6. Prepare actions and plan top_element = Element(parent_model.oscal_read(input_file)) create_action = CreatePathAction(desired_model_path.absolute(), True) write_action = WriteFileAction(desired_model_path.absolute(), top_element, content_type) # create a plan to create the directory and imported file. import_plan = Plan() import_plan.add_action(create_action) import_plan.add_action(write_action) try: import_plan.simulate() except TrestleError as err: logger.debug(f'import_plan.simulate() failed: {err}') logger.error( f'Import failed, error in testing import operation: {err}') return 1 try: import_plan.execute() except TrestleError as err: logger.debug(f'import_plan.execute() failed: {err}') logger.error( f'Import failed, error in actual import operation: {err}') return 1 # 7. Leave the rest to trestle split return 0
def test_plan_execution(tmp_path, sample_nist_component_def: component.ComponentDefinition): """Test successful execution of a valid plan.""" content_type = FileContentType.YAML base_dir: pathlib.Path = pathlib.Path.joinpath(tmp_path, 'mycomponent') targets_dir: pathlib.Path = pathlib.Path.joinpath(base_dir, 'components') metadata_yaml: pathlib.Path = pathlib.Path.joinpath(base_dir, 'metadata.yaml') test_utils.ensure_trestle_config_dir(base_dir) # hand craft a split plan split_plan = Plan() split_plan.add_action(CreatePathAction(metadata_yaml)) split_plan.add_action( WriteFileAction( metadata_yaml, Element(sample_nist_component_def.metadata, 'component-definition'), content_type ) ) # Test stringing a plan stringed = str(split_plan) assert len(stringed) > 0 target_files: List[pathlib.Path] = [] for index in range(len(sample_nist_component_def.components)): target_file: pathlib.Path = pathlib.Path.joinpath(targets_dir, f'component_{index}.yaml') target_files.append(target_file) split_plan.add_action(CreatePathAction(target_file)) split_plan.add_action( WriteFileAction(target_file, Element(sample_nist_component_def.components[index], 'target'), content_type) ) # execute the plan split_plan.execute() assert base_dir.exists() assert targets_dir.exists() assert metadata_yaml.exists() for target_file in target_files: assert target_file.exists() split_plan.rollback() assert base_dir.exists() is True assert targets_dir.exists() is False assert metadata_yaml.exists() is False for target_file in target_files: target_file.exists()
def test_split_chained_sub_models(tmp_path: pathlib.Path, sample_catalog: oscatalog.Catalog) -> None: """Test for split_model method with chained sum models like catalog.metadata.parties.*.""" # Assume we are running a command like below # trestle split -f catalog.json -e catalog.metadata.parties.* # see https://github.com/IBM/compliance-trestle/issues/172 content_type = FileContentType.JSON # prepare trestle project dir with the file catalog_dir, catalog_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_catalog, test_utils.CATALOGS_DIR) # read the model from file catalog = oscatalog.Catalog.oscal_read(catalog_file) element = Element(catalog) element_args = ['catalog.metadata.parties.*'] element_paths = test_utils.prepare_element_paths(catalog_dir, element_args) assert 2 == len(element_paths) expected_plan = Plan() # prepare to extract metadata and parties metadata_file = catalog_dir / element_paths[0].to_file_path(content_type) metadata_field_alias = element_paths[0].get_element_name() metadata = element.get_at(element_paths[0]) meta_element = Element(metadata, metadata_field_alias) # extract parties parties_dir = catalog_dir / 'catalog/metadata/parties' for i, party in enumerate(meta_element.get_at(element_paths[1], False)): prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) sub_model_actions = SplitCmd.prepare_sub_model_split_actions( party, parties_dir, prefix, content_type) expected_plan.add_actions(sub_model_actions) # stripped metadata stripped_metadata = metadata.stripped_instance( stripped_fields_aliases=['parties']) expected_plan.add_action(CreatePathAction(metadata_file)) expected_plan.add_action( WriteFileAction(metadata_file, Element(stripped_metadata, metadata_field_alias), content_type)) # stripped catalog root_file = catalog_dir / element_paths[0].to_root_path(content_type) remaining_root = element.get().stripped_instance(metadata_field_alias) expected_plan.add_action(CreatePathAction(root_file, True)) expected_plan.add_action( WriteFileAction(root_file, Element(remaining_root), content_type)) split_plan = SplitCmd.split_model(catalog, element_paths, catalog_dir, content_type) assert expected_plan == split_plan
def _run(self, args: argparse.Namespace) -> int: """Top level import run command.""" try: log.set_log_level_from_args(args) trestle_root = args.trestle_root if not file_utils.is_valid_project_root(trestle_root): raise TrestleRootError( f'Attempt to import from non-valid trestle project root {trestle_root}' ) input_uri = args.file if cache.FetcherFactory.in_trestle_directory( trestle_root, input_uri): raise TrestleError( f'Imported file {input_uri} cannot be from current trestle project. Use duplicate instead.' ) content_type = FileContentType.to_content_type( '.' + input_uri.split('.')[-1]) fetcher = cache.FetcherFactory.get_fetcher(trestle_root, str(input_uri)) model_read, parent_alias = fetcher.get_oscal(True) plural_path = ModelUtils.model_type_to_model_dir(parent_alias) output_name = args.output desired_model_dir = trestle_root / plural_path desired_model_path: pathlib.Path = desired_model_dir / output_name / parent_alias desired_model_path = desired_model_path.with_suffix( FileContentType.to_file_extension(content_type)).resolve() if desired_model_path.exists(): raise TrestleError( f'Cannot import because file to be imported here: {desired_model_path} already exists.' ) if args.regenerate: logger.debug( f'regenerating uuids in imported file {input_uri}') model_read, lut, nchanged = ModelUtils.regenerate_uuids( model_read) logger.debug( f'uuid lut has {len(lut.items())} entries and {nchanged} refs were updated' ) top_element = Element(model_read) create_action = CreatePathAction(desired_model_path, True) write_action = WriteFileAction(desired_model_path, top_element, content_type) # create a plan to create the directory and write the imported file. import_plan = Plan() import_plan.add_action(create_action) import_plan.add_action(write_action) import_plan.execute() args = argparse.Namespace(file=desired_model_path, verbose=args.verbose, trestle_root=args.trestle_root, type=None, all=None) rollback = False try: rc = validatecmd.ValidateCmd()._run(args) if rc > 0: logger.warning( f'Validation of imported file {desired_model_path} did not pass' ) rollback = True except TrestleError as err: logger.warning( f'Import of {str(input_uri)} failed with validation error: {err}' ) rollback = True if rollback: logger.debug( f'Rolling back import of {str(input_uri)} to {desired_model_path}' ) try: import_plan.rollback() except TrestleError as err: raise TrestleError( f'Import failed in plan rollback: {err}. Manually remove {desired_model_path} to recover.' ) logger.debug( f'Successful rollback of import to {desired_model_path}') return CmdReturnCodes.COMMAND_ERROR.value return CmdReturnCodes.SUCCESS.value except Exception as e: # pragma: no cover return handle_generic_command_exception( e, logger, 'Error while importing OSCAL file')
def test_split_multi_level_dict( tmp_path: pathlib.Path, sample_target_def: ostarget.TargetDefinition) -> None: """Test for split_model method.""" # Assume we are running a command like below # trestle split -f target.yaml -e target-definition.targets.*.target-control-implementations.* content_type = FileContentType.YAML # prepare trestle project dir with the file target_def_dir, target_def_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_target_def, test_utils.TARGET_DEFS_DIR) file_ext = FileContentType.to_file_extension(content_type) # read the model from file target_def: ostarget.TargetDefinition = ostarget.TargetDefinition.oscal_read( target_def_file) element = Element(target_def) element_args = [ 'target-definition.targets.*.target-control-implementations.*' ] element_paths = test_utils.prepare_element_paths(target_def_dir, element_args) expected_plan = Plan() # extract values targets: dict = element.get_at(element_paths[0]) targets_dir = target_def_dir / element_paths[0].to_file_path() # split every targets for key in targets: # individual target dir target: ostarget.Target = targets[key] target_element = Element(targets[key]) model_type = utils.classname_to_alias(type(target).__name__, 'json') dir_prefix = key target_dir_name = f'{dir_prefix}{const.IDX_SEP}{model_type}' target_file = targets_dir / f'{target_dir_name}{file_ext}' # target control impl dir for the target target_ctrl_impls: dict = target_element.get_at(element_paths[1]) targets_ctrl_dir = targets_dir / element_paths[1].to_file_path( root_dir=target_dir_name) for i, target_ctrl_impl in enumerate(target_ctrl_impls): model_type = utils.classname_to_alias( type(target_ctrl_impl).__name__, 'json') file_prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) file_name = f'{file_prefix}{const.IDX_SEP}{model_type}{file_ext}' file_path = targets_ctrl_dir / file_name expected_plan.add_action(CreatePathAction(file_path)) expected_plan.add_action( WriteFileAction(file_path, Element(target_ctrl_impl), content_type)) # write stripped target model stripped_target = target.stripped_instance( stripped_fields_aliases=[element_paths[1].get_element_name()]) expected_plan.add_action(CreatePathAction(target_file)) expected_plan.add_action( WriteFileAction(target_file, Element(stripped_target), content_type)) root_file = target_def_dir / f'target-definition{file_ext}' remaining_root = element.get().stripped_instance( stripped_fields_aliases=[element_paths[0].get_element_name()]) expected_plan.add_action(CreatePathAction(root_file, True)) expected_plan.add_action( WriteFileAction(root_file, Element(remaining_root), content_type)) split_plan = SplitCmd.split_model(target_def, element_paths, target_def_dir, content_type) assert expected_plan == split_plan
def test_subsequent_split_model( tmp_path: pathlib.Path, sample_target_def: ostarget.TargetDefinition) -> None: """Test subsequent split of sub models.""" # Assume we are running a command like below # trestle split -f target-definition.yaml -e target-definition.metadata content_type = FileContentType.YAML # prepare trestle project dir with the file target_def_dir, target_def_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_target_def, test_utils.TARGET_DEFS_DIR) # first split the target-def into metadata target_def = ostarget.TargetDefinition.oscal_read(target_def_file) element = Element(target_def, 'target-definition') element_args = ['target-definition.metadata'] element_paths = test_utils.prepare_element_paths(target_def_dir, element_args) metadata_file = target_def_dir / element_paths[0].to_file_path( content_type) metadata: ostarget.Metadata = element.get_at(element_paths[0]) root_file = target_def_dir / element_paths[0].to_root_path(content_type) metadata_field_alias = element_paths[0].get_element_name() stripped_root = element.get().stripped_instance( stripped_fields_aliases=[metadata_field_alias]) root_wrapper_alias = utils.classname_to_alias( stripped_root.__class__.__name__, 'json') first_plan = Plan() first_plan.add_action(CreatePathAction(metadata_file)) first_plan.add_action( WriteFileAction(metadata_file, Element(metadata, metadata_field_alias), content_type)) first_plan.add_action(CreatePathAction(root_file, True)) first_plan.add_action( WriteFileAction(root_file, Element(stripped_root, root_wrapper_alias), content_type)) first_plan.execute() # this will split the files in the temp directory # now, prepare the expected plan to split metadta at parties second_plan = Plan() metadata_file_dir = target_def_dir / element_paths[0].to_root_path() metadata2 = ostarget.Metadata.oscal_read(metadata_file) element = Element(metadata2, metadata_field_alias) element_args = ['metadata.parties.*'] element_paths = test_utils.prepare_element_paths(target_def_dir, element_args) parties_dir = metadata_file_dir / element_paths[0].to_file_path() for i, party in enumerate(element.get_at(element_paths[0])): prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) sub_model_actions = SplitCmd.prepare_sub_model_split_actions( party, parties_dir, prefix, content_type) second_plan.add_actions(sub_model_actions) # stripped metadata stripped_metadata = metadata2.stripped_instance( stripped_fields_aliases=['parties']) second_plan.add_action(CreatePathAction(metadata_file, True)) second_plan.add_action( WriteFileAction(metadata_file, Element(stripped_metadata, metadata_field_alias), content_type)) # call the split command and compare the plans split_plan = SplitCmd.split_model(metadata, element_paths, metadata_file_dir, content_type) assert second_plan == split_plan
def test_merge_plan_simple_list(testdata_dir, tmp_trestle_dir): """Test '$mycatalog$ trestle merge -e metadata.roles'.""" # Assume we are running a command like below # trestle merge -e catalog.back-matter content_type = FileContentType.JSON fext = FileContentType.to_file_extension(content_type) # prepare trestle project dir with the file test_utils.ensure_trestle_config_dir(tmp_trestle_dir) test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = Path('catalogs/') mycatalog_dir = catalogs_dir / 'mycatalog' catalog_dir = mycatalog_dir / 'catalog' # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.copytree(test_data_source, catalogs_dir) os.chdir(mycatalog_dir) catalog_dir = Path('catalog/') os.chdir(catalog_dir) metadata_dir = Path('metadata/') metadata_file = Path(f'metadata{fext}') roles_dir = metadata_dir / 'roles' # Read files # The destination file/model needs to be loaded in a stripped model stripped_metadata_type, _ = fs.get_stripped_contextual_model( metadata_file.absolute()) stripped_metadata = stripped_metadata_type.oscal_read(metadata_file) # Back-matter model needs to be complete and if it is decomposed, needs to be merged recursively first roles = [ oscatalog.Role.oscal_read(roles_dir / '00000__role.json'), oscatalog.Role.oscal_read(roles_dir / '00001__role.json') ] # Back-matter needs to be inserted in a stripped Catalog that does NOT exclude the back-matter fields merged_metadata_type, merged_metadata_alias = fs.get_stripped_contextual_model( metadata_file.absolute(), aliases_not_to_be_stripped=['roles']) merged_dict = stripped_metadata.__dict__ merged_dict['roles'] = roles merged_metadata = merged_metadata_type(**merged_dict) element = Element(merged_metadata, merged_metadata_alias) # Create hand-crafter merge plan reset_destination_action = CreatePathAction(metadata_file.absolute(), clear_content=True) write_destination_action = WriteFileAction(metadata_file, element, content_type=content_type) delete_element_action = RemovePathAction(roles_dir.absolute()) expected_plan: Plan = Plan() expected_plan.add_action(reset_destination_action) expected_plan.add_action(write_destination_action) expected_plan.add_action(delete_element_action) # Call merged() generated_plan = MergeCmd.merge(ElementPath('metadata.roles')) # Assert the generated plan matches the expected plan' assert len(list(diff(generated_plan, expected_plan))) == 0
def test_merge_plan_simple_case(testdata_dir, tmp_trestle_dir): """Test '$mycatalog$ trestle merge -e catalog.back-matter'.""" # Assume we are running a command like below # trestle merge -e catalog.back-matter content_type = FileContentType.JSON fext = FileContentType.to_file_extension(content_type) # prepare trestle project dir with the file test_utils.ensure_trestle_config_dir(tmp_trestle_dir) test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = Path('catalogs/') mycatalog_dir = catalogs_dir / 'mycatalog' catalog_dir = mycatalog_dir / 'catalog' # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.copytree(test_data_source, catalogs_dir) os.chdir(mycatalog_dir) catalog_file = Path(f'catalog{fext}').resolve() catalog_dir = Path('catalog/') back_matter_file = (catalog_dir / f'back-matter{fext}').resolve() assert catalog_file.exists() assert back_matter_file.exists() # Read files # The destination file/model needs to be loaded in a stripped model stripped_catalog_type, _ = ModelUtils.get_stripped_model_type( catalog_file.resolve(), tmp_trestle_dir) stripped_catalog = stripped_catalog_type.oscal_read(catalog_file) # Back-matter model needs to be complete and if it is decomposed, needs to be merged recursively first back_matter = common.BackMatter.oscal_read(back_matter_file) # Back-matter needs to be inserted in a stripped Catalog that does NOT exclude the back-matter fields merged_catalog_type, merged_catalog_alias = ModelUtils.get_stripped_model_type( catalog_file.resolve(), tmp_trestle_dir, aliases_not_to_be_stripped=['back-matter']) merged_dict = stripped_catalog.__dict__ merged_dict['back-matter'] = back_matter merged_catalog = merged_catalog_type(**merged_dict) element = Element(merged_catalog, merged_catalog_alias) # Create hand-crafter merge plan reset_destination_action = CreatePathAction(catalog_file, clear_content=True) write_destination_action = WriteFileAction(catalog_file, element, content_type=content_type) delete_element_action = RemovePathAction(back_matter_file) expected_plan: Plan = Plan() expected_plan.add_action(reset_destination_action) expected_plan.add_action(write_destination_action) expected_plan.add_action(delete_element_action) # Call merge() generated_plan = MergeCmd.merge(Path.cwd(), ElementPath('catalog.back-matter'), tmp_trestle_dir) # Assert the generated plan matches the expected plan' assert generated_plan == expected_plan
def split_model_at_path_chain( cls, model_obj: OscalBaseModel, element_paths: List[ElementPath], base_dir: pathlib.Path, content_type: FileContentType, cur_path_index: int, split_plan: Plan, strip_root: bool, root_file_name: str, aliases_to_strip: Dict[str, AliasTracker], last_one: bool = True ) -> int: """Recursively split the model at the provided chain of element paths. It assumes that a chain of element paths starts at the cur_path_index with the first path ending with a wildcard (*) If the wildcard follows an element that is inherently a list of items, the list of items is extracted. But if the wildcard follows a generic model than members of that model class found in the model will be split off. But only the non-trivial elements are removed, i.e. not str, int, datetime, etc. Args: model_obj: The OscalBaseModel to be split element_paths: The List[ElementPath] of elements to split, including embedded wildcards base_dir: pathlib.Path of the file being split content_type: json or yaml files cur_path_index: Index into the list of element paths for the current split operation split_plan: The accumulated plan of actions needed to perform the split strip_root: Whether to strip elements from the root object root_file_name: Filename of root file that gets split into a list of items aliases_to_strip: AliasTracker previously loaded with aliases that need to be split from each element last_one: bool indicating last item in array has been split and stripped model can now be written Returns: int representing the index where the chain of the path ends. Examples: For example, element paths could have a list of paths as below for a `ComponentDefinition` model where the first path is the start of the chain. For each of the sub model described by the first element path (e.g component-defintion.components.*) in the chain, the subsequent paths (e.g component.control-implementations.*) will be applied recursively to retrieve the sub-sub models: [ 'component-definition.component.*', 'component.control-implementations.*' ] for a command like below: trestle split -f component.yaml -e component-definition.components.*.control-implementations.* """ if split_plan is None: raise TrestleError('Split plan must have been initialized') if cur_path_index < 0: raise TrestleError('Current index of the chain of paths cannot be less than 0') # if there are no more element_paths, return the current plan if cur_path_index >= len(element_paths): return cur_path_index # initialize local variables element = Element(model_obj) stripped_field_alias: List[str] = [] # get the sub_model specified by the element_path of this round element_path = element_paths[cur_path_index] # does the next element_path point back at me is_parent = cur_path_index + 1 < len(element_paths) and element_paths[cur_path_index + 1].get_parent() == element_path # root dir name for sub models dir # 00000__group.json will have the root_dir name as 00000__group for sub models of group # catalog.json will have the root_dir name as catalog root_dir = '' if root_file_name != '': root_dir = str(pathlib.Path(root_file_name).with_suffix('')) sub_models = element.get_at(element_path, False) # we call sub_models as in plural, but it can be just one # assume cur_path_index is the end of the chain # value of this variable may change during recursive split of the sub-models below path_chain_end = cur_path_index # if wildcard is present in the element_path and the next path in the chain has current path as the parent, # Then deal with case of list, or split of arbitrary oscalbasemodel if is_parent and element_path.get_last() is not ElementPath.WILDCARD: # create dir for all sub model items sub_models_dir = base_dir / element_path.to_root_path() sub_model_plan = Plan() path_chain_end = cls.split_model_at_path_chain( sub_models, element_paths, sub_models_dir, content_type, cur_path_index + 1, sub_model_plan, True, '', aliases_to_strip ) sub_model_actions = sub_model_plan.get_actions() split_plan.add_actions(sub_model_actions) elif element_path.get_last() == ElementPath.WILDCARD: # extract sub-models into a dict with appropriate prefix sub_model_items: Dict[str, OscalBaseModel] = {} sub_models_dir = base_dir / element_path.to_file_path(root_dir=root_dir) if isinstance(sub_models, list): for i, sub_model_item in enumerate(sub_models): # e.g. `groups/00000_groups/` prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) sub_model_items[prefix] = sub_model_item # process list sub model items count = 0 for key, sub_model_item in sub_model_items.items(): count += 1 # recursively split the sub-model if there are more element paths to traverse # e.g. split component.control-implementations.* require_recursive_split = cur_path_index + 1 < len(element_paths) and element_paths[ cur_path_index + 1].get_parent() == element_path if require_recursive_split: # prepare individual directory for each sub-model sub_root_file_name = cmd_utils.to_model_file_name(sub_model_item, key, content_type) sub_model_plan = Plan() last_one: bool = count == len(sub_model_items) path_chain_end = cls.split_model_at_path_chain( sub_model_item, element_paths, sub_models_dir, content_type, cur_path_index + 1, sub_model_plan, True, sub_root_file_name, aliases_to_strip, last_one ) sub_model_actions = sub_model_plan.get_actions() else: sub_model_actions = cls.prepare_sub_model_split_actions( sub_model_item, sub_models_dir, key, content_type ) split_plan.add_actions(sub_model_actions) else: # the chain of path ends at the current index. # so no recursive call. Let's just write the sub model to the file and get out if sub_models is not None: sub_model_file = base_dir / element_path.to_file_path(content_type, root_dir=root_dir) split_plan.add_action(CreatePathAction(sub_model_file)) split_plan.add_action( WriteFileAction(sub_model_file, Element(sub_models, element_path.get_element_name()), content_type) ) # Strip the root model and add a WriteAction for the updated model object in the plan if strip_root: full_path = element_path.get_full() path = '.'.join(full_path.split('.')[:-1]) aliases = [element_path.get_element_name()] need_to_write = True use_alias_dict = aliases_to_strip is not None and path in aliases_to_strip if use_alias_dict: aliases = aliases_to_strip[path].get_aliases() need_to_write = aliases_to_strip[path].needs_writing() stripped_model = model_obj.stripped_instance(stripped_fields_aliases=aliases) # can mark it written even if it doesn't need writing since it is empty # but if an array only mark it written if it's the last one if last_one and use_alias_dict: aliases_to_strip[path].mark_written() # If it's an empty model after stripping the fields, don't create path and don't write field_list = [x for x in model_obj.__fields__.keys() if model_obj.__fields__[x] is not None] if set(field_list) == set(stripped_field_alias): return path_chain_end if need_to_write: if root_file_name != '': root_file = base_dir / root_file_name else: root_file = base_dir / element_path.to_root_path(content_type) split_plan.add_action(CreatePathAction(root_file)) wrapper_alias = classname_to_alias(stripped_model.__class__.__name__, AliasMode.JSON) split_plan.add_action(WriteFileAction(root_file, Element(stripped_model, wrapper_alias), content_type)) # return the end of the current path chain return path_chain_end
def test_merge_expanded_metadata_into_catalog(testdata_dir, tmp_trestle_dir): """Test '$mycatalog$ trestle merge -e catalog.metadata' when metadata is already split.""" # Assume we are running a command like below # trestle merge -e catalog.back-matter content_type = FileContentType.JSON fext = FileContentType.to_file_extension(content_type) # prepare trestle project dir with the file test_utils.ensure_trestle_config_dir(tmp_trestle_dir) test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = Path('catalogs/') mycatalog_dir = catalogs_dir / 'mycatalog' catalog_dir = mycatalog_dir / 'catalog' # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.copytree(test_data_source, catalogs_dir) # Change directory to mycatalog_dir os.chdir(mycatalog_dir) catalog_file = Path(f'catalog{fext}').resolve() catalog_dir = Path('catalog/') metadata_dir = catalog_dir / 'metadata' metadata_file = (catalog_dir / f'metadata{fext}').resolve() assert catalog_file.exists() assert metadata_dir.exists() assert metadata_file.exists() # Read files # Create hand-crafter merge plan expected_plan: Plan = Plan() reset_destination_action = CreatePathAction(catalog_file, clear_content=True) expected_plan.add_action(reset_destination_action) _, _, merged_metadata_instance = ModelUtils.load_distributed( metadata_file, tmp_trestle_dir) merged_catalog_type, _ = ModelUtils.get_stripped_model_type( catalog_file.resolve(), tmp_trestle_dir, aliases_not_to_be_stripped=['metadata']) stripped_catalog_type, _ = ModelUtils.get_stripped_model_type( catalog_file, tmp_trestle_dir) stripped_catalog = stripped_catalog_type.oscal_read(catalog_file) merged_catalog_dict = stripped_catalog.__dict__ merged_catalog_dict['metadata'] = merged_metadata_instance merged_catalog = merged_catalog_type(**merged_catalog_dict) element = Element(merged_catalog) write_destination_action = WriteFileAction(catalog_file, element, content_type=content_type) expected_plan.add_action(write_destination_action) delete_element_action = RemovePathAction(metadata_file) expected_plan.add_action(delete_element_action) # Call merge() generated_plan = MergeCmd.merge(Path.cwd(), ElementPath('catalog.metadata'), tmp_trestle_dir) # Assert the generated plan matches the expected plan' assert generated_plan == expected_plan
def test_remove(tmp_path, sample_catalog_minimal): """Test RemoveCmd.remove() method for trestle remove: removing Roles and Responsible-Parties.""" # 1. Remove responsible-parties # Note: minimal catalog does have responsible-parties but doesn't have Roles. file_path = pathlib.Path.joinpath(test_utils.JSON_TEST_DATA_PATH, 'minimal_catalog.json') catalog_with_responsible_parties = Element(Catalog.oscal_read(file_path)) # minimal catalog with responsible-parties (dict) removed file_path = pathlib.Path.joinpath( test_utils.JSON_TEST_DATA_PATH, 'minimal_catalog_no_responsible-parties.json') expected_catalog_responsible_parties_removed = Element( Catalog.oscal_read(file_path)) # Target path for removal: element_path = ElementPath('catalog.metadata.responsible-parties') expected_remove_action = RemoveAction(catalog_with_responsible_parties, element_path) # Call remove() method actual_remove_action, actual_catalog_removed_responsible_parties = RemoveCmd.remove( element_path, Catalog, catalog_with_responsible_parties) # 1.1 Assertion about action assert expected_remove_action == actual_remove_action add_plan = Plan() add_plan.add_action(actual_remove_action) add_plan.simulate() add_plan.execute() # 1.2 Assertion about resulting element after removal assert expected_catalog_responsible_parties_removed == actual_catalog_removed_responsible_parties # 2. Remove roles # Note: minimal catalog does have responsible-parties but doesn't have Roles. file_path = pathlib.Path.joinpath(test_utils.JSON_TEST_DATA_PATH, 'minimal_catalog.json') catalog_without_roles = Element(Catalog.oscal_read(file_path)) # minimal catalog with Roles file_path = pathlib.Path.joinpath(test_utils.JSON_TEST_DATA_PATH, 'minimal_catalog_roles.json') catalog_with_roles = Element(Catalog.oscal_read(file_path)) # Target path for removal: element_path = ElementPath('catalog.metadata.roles') expected_remove_action = RemoveAction(catalog_with_roles, element_path) # Call remove() method actual_remove_action, actual_catalog_removed_roles = RemoveCmd.remove( element_path, Catalog, catalog_with_roles) # 2.1 Assertion about action assert expected_remove_action == actual_remove_action add_plan = Plan() add_plan.add_action(actual_remove_action) add_plan.simulate() add_plan.execute() # 2.2 Assertion about resulting element after removal assert catalog_without_roles == actual_catalog_removed_roles
def test_split_multi_level_dict_plans( tmp_path: pathlib.Path, sample_nist_component_def: component.ComponentDefinition, keep_cwd) -> None: """Test for split_model method.""" # Assume we are running a command like below # trestle split -f target.yaml -e component-definition.components.*.control-implementations.* content_type = FileContentType.YAML # prepare trestle project dir with the file component_def_dir, component_def_file = test_utils.prepare_trestle_project_dir( tmp_path, content_type, sample_nist_component_def, test_utils.COMPONENT_DEF_DIR) file_ext = FileContentType.to_file_extension(content_type) # read the model from file component_def: component.ComponentDefinition = component.ComponentDefinition.oscal_read( component_def_file) element = Element(component_def) element_args = [ 'component-definition.components.*.control-implementations.*' ] element_paths = cmd_utils.parse_element_args( None, element_args, component_def_dir.relative_to(tmp_path)) expected_plan = Plan() # extract values components: list = element.get_at(element_paths[0]) components_dir = component_def_dir / element_paths[0].to_file_path() # split every targets for index, comp_obj in enumerate(components): # individual target dir component_element = Element(comp_obj) model_type = str_utils.classname_to_alias( type(comp_obj).__name__, AliasMode.JSON) dir_prefix = str(index).zfill(const.FILE_DIGIT_PREFIX_LENGTH) component_dir_name = f'{dir_prefix}{const.IDX_SEP}{model_type}' component_file = components_dir / f'{component_dir_name}{file_ext}' # target control impl dir for the target component_ctrl_impls: list = component_element.get_at(element_paths[1]) component_ctrl_dir = components_dir / element_paths[1].to_file_path( root_dir=component_dir_name) for i, component_ctrl_impl in enumerate(component_ctrl_impls): model_type = str_utils.classname_to_alias( type(component_ctrl_impl).__name__, AliasMode.JSON) file_prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) file_name = f'{file_prefix}{const.IDX_SEP}{model_type}{file_ext}' file_path = component_ctrl_dir / file_name expected_plan.add_action(CreatePathAction(file_path)) expected_plan.add_action( WriteFileAction(file_path, Element(component_ctrl_impl), content_type)) # write stripped target model stripped_target = comp_obj.stripped_instance( stripped_fields_aliases=[element_paths[1].get_element_name()]) expected_plan.add_action(CreatePathAction(component_file)) expected_plan.add_action( WriteFileAction(component_file, Element(stripped_target), content_type)) root_file = component_def_dir / f'component-definition{file_ext}' remaining_root = element.get().stripped_instance( stripped_fields_aliases=[element_paths[0].get_element_name()]) expected_plan.add_action(CreatePathAction(root_file, True)) expected_plan.add_action( WriteFileAction(root_file, Element(remaining_root), content_type)) split_plan = SplitCmd.split_model(component_def, element_paths, component_def_dir, content_type, '', None) assert expected_plan == split_plan
def merge(cls, element_path: ElementPath) -> Plan: """Merge operations. It returns a plan for the operation """ element_path_list = element_path.get_full_path_parts() target_model_alias = element_path_list[-1] """1. Load desination model into a stripped model""" # Load destination model destination_model_alias = element_path_list[-2] # Destination model filetype try: file_type = fs.get_contextual_file_type(Path(os.getcwd())) except Exception as e: raise TrestleError(str(e)) file_ext = FileContentType.to_file_extension(file_type) # Destination model filename destination_model_filename = Path( f'{utils.classname_to_alias(destination_model_alias, "json")}{file_ext}' ) destination_model_type, _ = fs.get_stripped_contextual_model( destination_model_filename.absolute()) destination_model_object = destination_model_type.oscal_read( destination_model_filename) """1.5. If target is wildcard, load distributed destrination model and replace destination model.""" # Handle WILDCARD '*' match. Return plan to load the destination model, with it's distributed attributes if target_model_alias == '*': merged_model_type, merged_model_alias, merged_model_instance = load_distributed.load_distributed( destination_model_filename) plan = Plan() reset_destination_action = CreatePathAction( destination_model_filename.absolute(), clear_content=True) write_destination_action = WriteFileAction( destination_model_filename, Element(merged_model_instance), content_type=file_type) delete_target_action = RemovePathAction( Path(merged_model_alias).absolute()) plan: Plan = Plan() plan.add_action(reset_destination_action) plan.add_action(write_destination_action) plan.add_action(delete_target_action) return plan # Get destination model without the target field stripped merged_model_type, merged_model_alias = fs.get_stripped_contextual_model( destination_model_filename.absolute(), aliases_not_to_be_stripped=[target_model_alias]) """2. Load Target model. Target model could be stripped""" try: target_model_type = utils.get_target_model(element_path_list, merged_model_type) except Exception as e: raise TrestleError( f'Target model not found. Possibly merge of the elements not allowed at this point. {str(e)}' ) # target_model filename - depends whether destination model is decomposed or not if (Path(os.getcwd()) / destination_model_alias).exists(): target_model_path = f'{os.getcwd()}/{destination_model_alias}/{target_model_alias}' else: target_model_path = target_model_alias # if target model is a file then handle file. If file doesn't exist, handle the directory, # but in this case it's a list or a dict collection type if (Path(f'{target_model_path}{file_ext}')).exists(): target_model_filename = Path(f'{target_model_path}{file_ext}') _, _, target_model_object = load_distributed.load_distributed( target_model_filename) else: target_model_filename = Path(target_model_path) collection_type = utils.get_origin(target_model_type) _, _, target_model_object = load_distributed.load_distributed( target_model_filename, collection_type) if hasattr(target_model_object, '__dict__') and '__root__' in target_model_object.__dict__: target_model_object = target_model_object.__dict__['__root__'] """3. Insert target model into destination model.""" merged_dict = destination_model_object.__dict__ merged_dict[target_model_alias] = target_model_object merged_model_object = merged_model_type(**merged_dict) # type: ignore merged_destination_element = Element(merged_model_object) """4. Create action plan""" reset_destination_action = CreatePathAction( destination_model_filename.absolute(), clear_content=True) write_destination_action = WriteFileAction(destination_model_filename, merged_destination_element, content_type=file_type) delete_target_action = RemovePathAction(target_model_filename) plan: Plan = Plan() plan.add_action(reset_destination_action) plan.add_action(write_destination_action) plan.add_action(delete_target_action) # TODO: Destination model directory is empty or already merged? Then clean up. return plan
def split_model_at_path_chain(cls, model_obj: OscalBaseModel, element_paths: List[ElementPath], base_dir: pathlib.Path, content_type: FileContentType, cur_path_index: int, split_plan: Plan, strip_root: bool, root_file_name: str = '') -> int: """Recursively split the model at the provided chain of element paths. It assumes that a chain of element paths starts at the cur_path_index with the first path ending with a wildcard (*) It returns the index where the chain of path ends. For example, element paths could have a list of paths as below for a `TargetDefinition` model where the first path is the start of the chain. For each of the sub model described by the first element path (e.g target-defintion.targets.*) in the chain, the subsequent paths (e.g. target.target-control-implementations.*) will be applied recursively to retrieve the sub-sub models: [ 'target-definition.targets.*', 'target.target-control-implementations.*' ] for a command like below: trestle split -f target.yaml -e target-definition.targets.*.target-control-implementations.* """ # assume we ran the command below: # trestle split -f target.yaml -e target-definition.targets.*.target-control-implementations.* if split_plan is None: raise TrestleError('Split plan must have been initialized') if cur_path_index < 0: raise TrestleError( 'Current index of the chain of paths cannot be less than 0') # if there are no more element_paths, return the current plan if cur_path_index >= len(element_paths): return cur_path_index # initialize local variables element = Element(model_obj) stripped_field_alias = [] # get the sub_model specified by the element_path of this round element_path = element_paths[cur_path_index] is_parent = cur_path_index + 1 < len(element_paths) and element_paths[ cur_path_index + 1].get_parent() == element_path # root dir name for sub models dir # 00000__group.json will have the root_dir name as 00000__group for sub models of group # catalog.json will have the root_dir name as catalog sub models root_dir = '' if root_file_name != '': root_dir = pathlib.Path(root_file_name).stem # check that the path is not multiple level deep path_parts = element_path.get() if path_parts[-1] == ElementPath.WILDCARD: path_parts = path_parts[:-1] if len(path_parts) > 2: msg = 'Trestle supports split of first level children only, ' msg += f'found path "{element_path}" with level = {len(path_parts)}' raise TrestleError(msg) sub_models = element.get_at( element_path, False) # we call sub_models as in plural, but it can be just one if sub_models is None: return cur_path_index # assume cur_path_index is the end of the chain # value of this variable may change during recursive split of the sub-models below path_chain_end = cur_path_index # if wildcard is present in the element_path and the next path in the chain has current path as the parent, # we need to split recursively and create separate file for each sub item # for example, in the first round we get the `targets` using the path `target-definition.targets.*` # so, now we need to split each of the target recursively. Note that target is an instance of dict # However, there can be other sub_model, which is of type list if is_parent and element_path.get_last() is not ElementPath.WILDCARD: # create dir for all sub model items sub_models_dir = base_dir / element_path.to_root_path() sub_model_plan = Plan() path_chain_end = cls.split_model_at_path_chain( sub_models, element_paths, sub_models_dir, content_type, cur_path_index + 1, sub_model_plan, True) sub_model_actions = sub_model_plan.get_actions() split_plan.add_actions(sub_model_actions) elif element_path.get_last() == ElementPath.WILDCARD: # extract sub-models into a dict with appropriate prefix sub_model_items: Dict[str, OscalBaseModel] = {} sub_models_dir = base_dir / element_path.to_file_path( root_dir=root_dir) if isinstance(sub_models, list): for i, sub_model_item in enumerate(sub_models): # e.g. `groups/00000_groups/` prefix = str(i).zfill(const.FILE_DIGIT_PREFIX_LENGTH) sub_model_items[prefix] = sub_model_item elif isinstance(sub_models, dict): # prefix is the key of the dict sub_model_items = sub_models else: # unexpected sub model type for multi-level split with wildcard raise TrestleError( f'Sub element at {element_path} is not of type list or dict for further split' ) # process list sub model items for key in sub_model_items: prefix = key sub_model_item = sub_model_items[key] # recursively split the sub-model if there are more element paths to traverse # e.g. split target.target-control-implementations.* require_recursive_split = cur_path_index + 1 < len( element_paths) and element_paths[ cur_path_index + 1].get_parent() == element_path if require_recursive_split: # prepare individual directory for each sub-model # e.g. `targets/<UUID>__target/` sub_root_file_name = cmd_utils.to_model_file_name( sub_model_item, prefix, content_type) sub_model_plan = Plan() path_chain_end = cls.split_model_at_path_chain( sub_model_item, element_paths, sub_models_dir, content_type, cur_path_index + 1, sub_model_plan, True, sub_root_file_name) sub_model_actions = sub_model_plan.get_actions() else: sub_model_actions = cls.prepare_sub_model_split_actions( sub_model_item, sub_models_dir, prefix, content_type) split_plan.add_actions(sub_model_actions) else: # the chain of path ends at the current index. # so no recursive call. Let's just write the sub model to the file and get out sub_model_file = base_dir / element_path.to_file_path( content_type, root_dir=root_dir) split_plan.add_action(CreatePathAction(sub_model_file)) split_plan.add_action( WriteFileAction( sub_model_file, Element(sub_models, element_path.get_element_name()), content_type)) # Strip the root model and add a WriteAction for the updated model object in the plan if strip_root: stripped_field_alias.append(element_path.get_element_name()) stripped_root = model_obj.stripped_instance( stripped_fields_aliases=stripped_field_alias) if root_file_name != '': root_file = base_dir / root_file_name else: root_file = base_dir / element_path.to_root_path(content_type) split_plan.add_action(CreatePathAction(root_file)) wrapper_alias = utils.classname_to_alias( stripped_root.__class__.__name__, 'json') split_plan.add_action( WriteFileAction(root_file, Element(stripped_root, wrapper_alias), content_type)) # return the end of the current path chain return path_chain_end