def main(): """ parse command line options and generate file """ # parse command line options parser = argparse.ArgumentParser( description= 'Searches the index for a given OVAL element and outputs relevant information' ) parser.add_argument('-i', '--id', help='id of definition fragment') args = vars(parser.parse_args()) if (not args['id']): parser.print_help() message('error', "Please specify --id.") sys.exit(0) # get index elements_index = lib_search.ElementsIndex(message) documents = elements_index.query({'oval_id': args['id']}) if len(documents) == 0: message('info', "No elements having id '{0}' found.".format(args['id'])) sys.exit(0) print(repr(documents))
def build_oval_document_for_definition(defid): """For a definition, build a complete oval_definitions document for it""" if defid is None: return None elements_index = lib_search.ElementsIndex(False) if verbose: print( " ---- Resolving all elements needed to build comprehensive document..." ) oval_ids = elements_index.find_downstream_ids(defid) file_paths = elements_index.get_paths_from_ids(oval_ids) if verbose: print( " ---- Importing separate elements into comprehensive document...." ) oval = OvalDocument(None) for path in file_paths: element = OvalElement.fromStandaloneFile(path) if element is None: print(":::: None from path: ", path) return None oval.addElement(element, True) return etree.fromstring(oval.to_string())
def main(): today = datetime.date.today() url = r"http://oval.mitre.org/rep-data/5.10/org.mitre.oval/oval.xml" dest = r"C:\_Development\oval_repository\Serializer\processed\oval-%s-%s-%s.xml" % ( today.year, today.month, today.day) (fn, msg) = urllib.request.urlretrieve(url, dest) message("INFO", "Downloaded file %s" % fn) oval_decomposition.decompose(fn, True) # get indexes definitions_index = lib_search.DefinitionsIndex(message) elements_index = lib_search.ElementsIndex(message) for defpath in lib_repo.get_definition_paths_iterator(): message("INFO", "Determining minimum schema version for file %s" % defpath) result = lib_schema.determine_def_min_version(defpath, definitions_index, elements_index, True) message( "INFO", "The minimum OVAL version supported by '{0}' is {1}.".format( defpath, result['minimum_version']))
def main(): """ parse command line options and generate file """ # parse command line options parser = argparse.ArgumentParser( description='Searches the index for duplicate oval ids') # get index elements_index = lib_search.ElementsIndex(message) all_oval_ids = dict() duplicate_oval_ids = dict() for document in elements_index.query(): oval_id = document['oval_id'] root_path = document['path'].replace(lib_repo.get_root_path(), '') if oval_id in all_oval_ids: if oval_id not in duplicate_oval_ids: duplicate_oval_ids[oval_id] = set() duplicate_oval_ids[oval_id].add(all_oval_ids[oval_id]) duplicate_oval_ids[oval_id].add(root_path) all_oval_ids[oval_id] = root_path print('Found {0} duplicate OVAL IDs'.format(len(duplicate_oval_ids))) pprint.pprint(duplicate_oval_ids)
def main(): """ parse command line options and generate file """ start_time = time.time() # parse command line options parser = argparse.ArgumentParser(description='Determine minimum supported OVAL schema for a given definition') parser.add_argument('-p', '--path', help='path to definition fragment') parser.add_argument('-i', '--id', help='id of definition fragment') parser.add_argument('-a', '--all', default=False, action="store_true", help='determine minimum supported OVAL schema for all indexed definition fragments. cannot be combined with --id or --path') parser.add_argument('-u', '--update', default=False, action="store_true", help='update the given definition to include the min_schema_version element') args = vars(parser.parse_args()) if (args['all'] and (args['id'] or args['path'])): parser.print_help() message('error', "--all cannot be combined with --id or --path.") sys.exit(0) if not args['all'] and not (args['path'] or args['id']): parser.print_help() message('error',"Please specify one of: --all, --path or --id.") sys.exit(0) if (args['path'] and args['id']): parser.print_help() message('error',"Please specify one of: --all, --path, or --id.") sys.exit(0) # get indexes definitions_index = lib_search.DefinitionsIndex(message) elements_index = lib_search.ElementsIndex(message) if args['all']: for defpath in lib_repo.get_definition_paths_iterator(): result = determine_def_min_version(defpath, definitions_index, elements_index, args['update']) report(defpath, result['minimum_version']) elif args['id']: documents = definitions_index.query({ 'oval_id': args['id'] }) if len(documents) == 0: message('info',"No definitions having id '{0}' found.".format(args['id'])) sys.exit(0) paths = [ document['path'] for document in documents ] for path in paths: result = determine_def_min_version(path, definitions_index, elements_index, args['update']) report(result['oval_id'], result['minimum_version']) elif args['path'] and not os.path.isfile(args['path']): message('info',"Definition fragment path not found: " + args['path']) sys.exit(0) else: result = determine_def_min_version(args['path'], definitions_index, elements_index, args['update']) report(args['path'], result['minimum_version']) seconds_elapsed = time.time() - start_time report('info','Completed in {0}!'.format(format_duration(seconds_elapsed)))
def build_comprehensive_oval_document(changes): """ Builds an XML tree which contains all elements affected by the changes """ global debug global verbose if changes is None or len(changes) < 1: return None if verbose: print(" ---- Getting OVAL ID's for all changed files...") oval_ids_changed = { lib_repo.path_to_oval_id(filepath) for filepath in changes } # find all upstream ids if verbose: print( " ---- Locating parent definitions for all changed elements...") elements_index = lib_search.ElementsIndex(False) upstream_ids = elements_index.find_upstream_ids(oval_ids_changed, set()) # filter affected to definition ids affected_def_ids = { oval_id for oval_id in upstream_ids if lib_repo.get_element_type_from_oval_id(oval_id) == 'definition' } # get all downstream elements if verbose: print( " ---- Resolving all elements needed to build comprehensive document..." ) oval_ids = elements_index.find_downstream_ids(affected_def_ids, affected_def_ids) file_paths = elements_index.get_paths_from_ids(oval_ids) if verbose: print( " ---- Importing separate elements into comprehensive document...." ) oval = OvalDocument(None) for path in file_paths: element = OvalElement.fromStandaloneFile(path) if element is None: print(":::: None from path: ", path) return None oval.addElement(element, True) return etree.fromstring(oval.to_string())
def main(): """ parse command line options and generate file """ start_time = time.time() # parse command line options parser = argparse.ArgumentParser( description='Builds all OVAL definitons in the repository.') parser.add_argument( '-l', '--limit', nargs='?', default="0", type=int, help='limits number of definitions that will be built)') args = vars(parser.parse_args()) # get indexes # definitions_index = lib_search.DefinitionsIndex(message) elements_index = lib_search.ElementsIndex(message) # create generator, build in memory b/c smallish files OvalGenerator = lib_xml.OvalGenerator(message) OvalGenerator.use_file_queues = False print("Storing data in temp directory: {}".format(tempfile.gettempdir())) i_file = 0 i_limit = args['limit'] for defpath in lib_repo.get_definition_paths_iterator(): i_file = i_file + 1 if i_limit > 0 and i_file > i_limit: break def_id = lib_repo.path_to_oval_id(defpath) message('info', 'Building file {0} for {1}.'.format(i_file, def_id)) # add all downstream element ids def_ids = set([def_id]) oval_ids = elements_index.find_downstream_ids(def_ids, def_ids) file_paths = elements_index.get_paths_from_ids(oval_ids) # add each OVAL definition to generator for file_path in file_paths: element_type = lib_repo.get_element_type_from_path(file_path) OvalGenerator.queue_element_file(element_type, file_path) # write output file outfile = '{1}/gen.{0}'.format(lib_repo.oval_id_to_path(def_id), tempfile.gettempdir()) OvalGenerator.to_file(outfile) seconds_elapsed = time.time() - start_time message('info', 'Completed in {0}!'.format(format_duration(seconds_elapsed)))
def main(): # get index elements_index = lib_search.ElementsIndex(message) for element_type in ['definitions', 'objects', 'states', 'tests', 'variables']: for root, dirs, files in os.walk(os.path.join(lib_repo.get_repository_root_path(), element_type)): for name in files: full_path = os.path.join(root,name) message('info','Searching index for path {0}'.format(full_path)) documents = elements_index.query({ 'path': full_path }) if len(documents) == 0: message('error',"File {0} has no corresponding index entry".format(full_path))
def show_affected(file_list): """For a list of files, show all elements that reference them """ if file_list is None or len(file_list) < 1: print(" ----- Empty file list. Nothing is affected by that") return elements_index = lib_search.ElementsIndex(False) for file in file_list: ovalid = lib_repo.path_to_oval_id(file) print("\n=========== For item {0}:".format(ovalid)) affected = elements_index.find_upstream_ids(ovalid, set(), depth_limit = 1) if affected is not None and len(affected) > 0: for affected_id in affected: print(" ---> {0}".format(affected_id)) else: print(" **** No affected items found ****")
def main(): orphans = {} elements_index = lib_search.ElementsIndex(message) for elempath in lib_repo.get_element_paths_iterator(): oval_id = lib_repo.path_to_oval_id(elempath) oval_ids = {oval_id} upstream_ids = elements_index.find_upstream_ids(oval_ids, set()) downstream_ids = elements_index.find_downstream_ids(oval_ids, set()) message( "INFO", "OVAL ID '%s' had %i upstream ids and %i downstream ids" % (oval_id, len(upstream_ids), len(downstream_ids))) if len(upstream_ids) == 0 and len(downstream_ids) == 0: orphans[oval_id] = elempath for ok in orphans.keys(): message("INFO", "Found Orphan '%s'" % ok) os.remove(orphans[ok])
def main(): """ parse command line options and generate file """ start_time = time.time() # parse command line options parser = argparse.ArgumentParser( description='Builds a schema-valid OVAL definitions file.') output_options = parser.add_argument_group('output options') output_options.add_argument( '-o', '--outfile', required=True, help='file name for output OVAL definitions file') output_options.add_argument('-v', '--validate', default=False, action="store_true", help='schema validate the output file') output_options.add_argument('-s', '--schematron', default=False, action="store_true", help='schematron validate the output file') output_options.add_argument( '-t', '--tempdir', required=False, default="./", help= "directory to store temporary files used when building oval definitions (default: './')" ) source_options = parser.add_argument_group( 'definitions filtering', 'Provide at least one of the following options to determine which definition(s) ' + 'will be included. Results will include the intersection of matches for each parameter ' + 'supplied. When multiple values are supplied for one paramater, the parameter will ' + 'match definitions that match any provided value.') source_options.add_argument('--definition_id', nargs='*', dest='oval_id', help='match OVAL definition id(s)') source_options.add_argument('--title', nargs='*', dest='title', metavar='PHRASE', help='match phrase(s) in definition titles') source_options.add_argument('--description', nargs='*', dest='description', metavar='PHRASE', help='match phrase(s) in definition titles') source_options.add_argument( '--class', nargs='*', dest='class', help='filter by class(es): {0}'.format(', '.join( lib_repo.supported_definition_classes))) source_options.add_argument( '--status', nargs='*', dest='status', help='filter by status(es): {0}'.format(', '.join( lib_repo.supported_definition_statuses))) source_options.add_argument('--family', nargs='*', dest='family', help='filter by family(ies)') source_options.add_argument('--platform', nargs='*', dest='platforms', metavar='PLATFORM', help='filter by platform(s)') source_options.add_argument('--product', nargs='*', dest='products', metavar='PRODUCT', help='filter by product(s)') source_options.add_argument('--contributor', nargs='*', dest='contributors', metavar='NAME', help='filter by contributor(s)') source_options.add_argument('--organization', nargs='*', dest='organizations', metavar='NAME', help='filter by organization(s)') source_options.add_argument( '--reference_id', nargs='*', dest='reference_ids', metavar='REFERENCE_ID', help='filter by reference ids, e.g. CVE-2015-3306') source_options.add_argument( '--max_schema_version', nargs="?", dest='max_schema_version', metavar='SCHEMA_VERSION', help='filter by maximum oval schema version, e.g. 5.10') source_options.add_argument( '--all_definitions', default=False, action="store_true", help= 'include all definitions in the repository (do not specify any other filters)' ) source_options.add_argument( '--from', nargs='?', default='', metavar='YYYYMMDD', help='include elements revised on or after this day (format: YYYYMMDD)' ) source_options.add_argument( '--to', nargs='?', default='', metavar='YYYYMMDD', help='include elements revised on or before this day (format: YYYYMMDD)' ) args = vars(parser.parse_args()) # get definitions index definitions_index = lib_search.DefinitionsIndex(message) # contruct query from args query = {} for field in definitions_index.get_fieldnames(): if field in args and args[field]: query[field] = args[field] # add schema_version filter, if specified if args['max_schema_version']: query['min_schema_version'] = '[0 TO {0}]'.format( definitions_index.version_to_int(args['max_schema_version'])) # add date range and contributor/org filters, if specified all_definitions_filtered = False if args['from'] or args['to'] or args['contributors'] or args[ 'organizations']: # get revisions index revisions_index = lib_search.RevisionsIndex(message) if args['from'] or args['to']: filtered_oval_ids = revisions_index.get_definition_ids({ 'date': revisions_index.format_daterange(args['from'], args['to']) }) if args['contributors']: contributor_filtered_ids = revisions_index.get_definition_ids( {'contributor': args['contributors']}) filtered_oval_ids = filtered_oval_ids & contributor_filtered_ids if 'filtered_oval_ids' in locals( ) else contributor_filtered_ids if args['organizations']: organization_filtered_ids = revisions_index.get_definition_ids( {'organization': args['organizations']}) filtered_oval_ids = filtered_oval_ids & organization_filtered_ids if 'filtered_oval_ids' in locals( ) else organization_filtered_ids # add to query if 'oval_id' in query and query['oval_id']: # if oval_id(s) specified in args, get intersection with filtered oval ids query['oval_id'] = set(query['oval_id']) & filtered_oval_ids else: query['oval_id'] = filtered_oval_ids if not query['oval_id']: all_definitions_filtered = True # --all_definitions OR at least one definition selection option must be specified if args['all_definitions'] and query: parser.print_help() message( 'error', "The '--all_definitions' filter cannot be combined with any other filters." ) sys.exit(1) elif not (args['all_definitions'] or query): parser.print_help() message( 'error', 'At least one definitions filtering argument must be provided.') sys.exit(1) # query index query_results = definitions_index.query( query) if not all_definitions_filtered else {} # get set of all definition ids found definition_ids = {document['oval_id'] for document in query_results} message('info', 'Found {0} matching OVAL definitions'.format(len(definition_ids))) # create generator and set oval schema version, if necessary OvalGenerator = lib_xml.OvalGenerator(message, args['tempdir']) if args['max_schema_version']: OvalGenerator.oval_schema_version = args['max_schema_version'] if definition_ids: # add all downstream element ids message('info', 'Finding downstream OVAL ids for all definitions') elements_index = lib_search.ElementsIndex(message) oval_ids = elements_index.find_downstream_ids(definition_ids, definition_ids) message( 'info', 'Found {0} downstream OVAL ids'.format( len(oval_ids) - len(query_results))) # get paths for all elements message('info', 'Finding paths for {0} OVAL elements'.format(len(oval_ids))) file_paths = elements_index.get_paths_from_ids(oval_ids) # build in memory if there aren't that many files if len(file_paths) < 200: OvalGenerator.use_file_queues = False # add each OVAL definition to generator message( 'info', 'Generating OVAL definition file with {0} elements'.format( len(oval_ids))) for file_path in file_paths: element_type = lib_repo.get_element_type_from_path(file_path) OvalGenerator.queue_element_file(element_type, file_path) # write output file message('info', 'Writing OVAL definitions to {0}'.format(args['outfile'])) OvalGenerator.to_file(args['outfile']) # validate if args['validate']: # schema validate schema_path = lib_repo.get_oval_def_schema( OvalGenerator.oval_schema_version) message('info', 'performing schema validation') try: lib_xml.schema_validate(args['outfile'], schema_path) message('info', 'schema validation successful') except lib_xml.SchemaValidationError as e: message('error', 'schema validation failed:\n\t{0}'.format(e.message)) if args['schematron']: # schematron validate schema_path = lib_repo.get_oval_def_schema( OvalGenerator.oval_schema_version) message('info', 'performing schematron validation') try: lib_xml.schematron_validate(args['outfile'], schema_path) message('info', 'schematron validation successful') except lib_xml.SchematronValidationError as e: message( 'error', 'schematron validation failed:\n\t{0}'.format( '\n\t'.join(e.messages))) seconds_elapsed = time.time() - start_time message('info', 'Completed in {0}!'.format(format_duration(seconds_elapsed)))
def main(): start_time = time.time() tracking = dict() elements_index = lib_search.ElementsIndex(message) # getting all definitions all_def_ids = set() message('info', 'getting all definitions') for defpath in lib_repo.get_definition_paths_iterator(): all_def_ids.add(lib_repo.path_to_oval_id(defpath)) message('info', 'found {0} definitions'.format(len(all_def_ids))) # getting all element ids downstream from any definition message('info', 'getting all downstream element ids') all_downstream_ids = elements_index.find_downstream_ids(all_def_ids) message('info', 'found {0} downstream element ids'.format(len(all_downstream_ids))) # get elements that aren't in all_downstream_ids message('info', 'checking all elements') cur_element_type = None for elempath in lib_repo.get_element_paths_iterator(): oval_id = lib_repo.path_to_oval_id(elempath) element_type = lib_repo.get_element_type_from_oval_id(oval_id) # skip definitions... we're only pruning child elements if element_type == 'definition': continue # write status msg if element_type != cur_element_type: cur_element_type = element_type i_element_type = 0 i_element_type = i_element_type + 1 sys.stdout.write( 'Analyzing {0}s: {1} \r'.format( cur_element_type, i_element_type)) sys.stdout.flush() # it's an orphan if it's not downsteam of a definition track_as = 'orphan' if oval_id not in all_downstream_ids else 'in_use' if not track_as in tracking: tracking[track_as] = dict() if not element_type in tracking[track_as]: tracking[track_as][element_type] = set() tracking[track_as][element_type].add(oval_id) sys.stdout.write( ' \r'. format(cur_element_type, i_element_type)) sys.stdout.flush() # generate report report = [] for track_as, elements_by_type in tracking.items(): report.append('\t{0}:'.format(track_as.replace('_', ' ').capitalize())) for element_type, oval_ids in elements_by_type.items(): report.append('\t\t{0} {1}s'.format(len(oval_ids), element_type)) message('found', '\n'.join(report)) response = input("\n :::: Remove all orphans? (N[o] / y[es]): ") if response.lower() == 'y': orphan_ids = set() for element_type, oval_ids in tracking['orphan'].items(): orphan_ids.update(oval_ids) file_paths = elements_index.get_paths_from_ids(orphan_ids) for file_path in file_paths: message("INFO", "Deleting Orphan '%s'" % os.path.basename(file_path)) os.remove(file_path) seconds_elapsed = time.time() - start_time message('info', 'Completed in {0}!'.format(format_duration(seconds_elapsed)))
def main(): SCHEMA_VERSION = '5.11.1' global verbose global debug global autoaccept parser = argparse.ArgumentParser(description='Performs all of the identified QA tasks against an OVAL submission in the repository') output_options = parser.add_argument_group('verbosity options') output_options.add_argument('-v', '--verbose', default=False, action="store_true", help='Verbose progress messages') output_options.add_argument('-d', '--debug', default=False, action="store_true", help='Include debug information on errors') # output_options.add_argument('-a', '--autoaccept', default=False, action="store_true", help='Automatically continue without prompting when it is safe to do so') args = vars(parser.parse_args()) if args['verbose']: verbose = True if args['debug']: debug = True # if args['autoaccept']: # autoaccept = True # Grab some things we're going to need later # First, build the schema path cache element_index = lib_search.ElementsIndex(message) schema_path_cache = {} for schema_version in lib_repo.get_schema_versions(): schema_path_cache[schema_version] = lib_repo.get_oval_def_schema(schema_version) # 1. Locate all uncommitted changes to the local repository if verbose: print("\n + 1: looking for uncommitted changes") change_list = lib_git.get_uncommitted_oval() if change_list is None or len(change_list) < 1: print("\n-------- This update does not include any changes of significance --------\n") return # 1.1 Determine which of these changes are due to removed files if verbose: print(" +++ Number of changed items to QA: {0}".format(len(change_list))) remove_list = find_removed_items(change_list) # 1.2 Issue warning (prompt to continue) if any of the changes are a deleted item if verbose: print(" + 1.2: Determining if any changes are deleted items...") if remove_list is not None and len(remove_list) > 0: print("\n -------- The following files were removed as a part of this update --------\n") show_files(remove_list) # TODO: Offer the option to inspect the OVALIDs in the removed files and # build a list of what items, if any, refer to them response = input("\n :: Accept these changes (N[o] / y[es] / s[how affected]): ") if response == 's' or response == 'S': show_affected(remove_list) response = input("\n :::: Accept these changes (N[o] / y[es]): ") if response != 'y' and response != 'Y': return elif response != 'y' and response != 'Y': return # 1.3 Don't include removed files as part of the update change_list = [file for file in change_list if file not in remove_list] elif verbose: print(" +++ No removed items found") # 2. Remove all changes that are semantically the same as existing elements (except for states) if verbose: print("\n + 2: Removing items that don't contain meaningful changes...") change_list = prune_unchanged_elements(change_list) # 2.1 If that means we have no changes left, there is nothing else to do if len(change_list) < 1: print("\n ----- This update does not include any changes of significance") return print("\n ---- Number of changed elements to process: {0}\n".format(len(change_list))) # 3. For each element in the list that is a definition, check: if verbose: print(" + 3: Checking correctness of definition metadata") def_list = [ path for path in change_list if lib_repo.get_element_type_from_path(path) == 'definition'] if def_list is not None and len(def_list) > 0: valid_metadata = 1 if verbose: print(" +++ Number of definitions in this update: {0}".format(len(def_list))) for def_path in def_list: def_element = lib_xml.load_standalone_element(def_path) ode = lib_oval.OvalElement(def_element) od = lib_oval.OvalDefinition(ode.getElement()) def_id = od.getId() # 3.1 If this is an update, does it change any existing metadata? # 3.2 Check existence and accuracy of definition metadata (<status> and date) # - INITIAL SUBMISSION or DRAFT on new submission # - INTERIM if updating a previous definition # - ? # no <dates> - invalid # @version == 0: # no <submitted> - invalid # <status_change>s > 0 - invalid # <status> != "INITIAL SUBMISSION" - invalid # @ version > 0: # last <status_change> != <status> - invalid def_status_change = od.get_last_status_change() if def_status_change["Version"] == "0": if "Submitted" not in def_status_change or def_status_change["Submitted"] is None: print(" ++++ Definition ID %s is NOT valid:" % def_id) print(" - New definitions must contain a submitted element") valid_metadata = 0 if def_status_change["StatusChange"]: print(" ++++ Definition ID %s is NOT valid:" % def_id) print(" - New definitions should not contain a status change element") valid_metadata = 0 if def_status_change["Status"] != "INITIAL SUBMISSION": print(" ++++ Definition ID %s is NOT valid:" % def_id) print(" - New definitions must have a status of INITIAL SUBMISSION") valid_metadata = 0 else: defstatus = def_status_change["Status"] lscstatus = def_status_change["StatusChange"]["Status"] if (defstatus != lscstatus): print(" ++++ Definition ID %s is NOT valid:" % def_id) print(" - Last status change (%s) does not match definition status (%s)" % (lscstatus, defstatus)) valid_metadata = 0 if valid_metadata == 0: print("\n ++++ Definition Metadata is Invalid. Exiting...") return elif verbose: print(" +++ No definitions to check") # 4. Schema validate the changes # First, generate an OVAL document if verbose: print("\n + 4: Schema validating changes...") schema_path = lib_repo.get_oval_def_schema(SCHEMA_VERSION) for element_file in change_list: try: lib_xml.schema_validate(element_file, schema_path, True) except Exception as e: print(' Schema validation failed:\n\t{0}'.format(e.message)) print("\n ### Offending file {0}".format(element_file)) return if verbose: print(" ++++ Schema validations passed") print("\n + 5: Updating elements...") # 5. On passing all of the above, make these changes for all elements: oval_id_map = {} affected_elements = set() update_elements = {} for path in change_list: oval_element = lib_xml.load_standalone_element(path) update_elements[path] = oval_element # 5.1 If it's a definition, determine and set the minimum schema version ovalid = oval_element.get("id") if verbose: print("\n ---- Processing submitted element {0}".format(ovalid)) if lib_repo.get_element_type_from_path(path) == 'definition': if verbose: print(" --- Is a definition: determining minimum schema version") # min_schema = determine_definition_minimum_schema_version(path, element_index, schema_path_cache) min_schema = determine_definition_minimum_schema_version(oval_element, element_index, schema_path_cache) if min_schema and min_schema is not None: if verbose: print(" ---- Determined minimum schema version to be {0}".format(min_schema)) set_minimum_schema_version(oval_element, min_schema) # 5.2 For each element that is not using an OVALID in the CIS namespace: is_update = True if not is_repository_id(ovalid): is_update = False element_type = lib_repo.get_element_type_from_path(path) new_id = generate_next_ovalid(element_type, element_index) if verbose: print(" ---- Change submission ID from '{0}' to '{1}'".format(ovalid, new_id)) oval_element.set("id", new_id) # 5.2.1 Set to a unique OVALID in the CIS namespace # 5.2.2 Update all references from the old OVALID oval_id_map[ovalid] = new_id # 5.3 Set/update version numbers as necessary. The previous step can be used to determine new vice update if is_update: # 5.3.1 If this is an update, find the current version and increment by one if verbose: print(" ---- Is an update: incrementing version") increment_version(oval_element) # Find all upstream elements and add them, as unique, to the list of items to change if lib_repo.get_element_type_from_path(path) != 'definition': if verbose: print(" ---- Not a definition. Finding affected upstream elements...") affected = find_affected(ovalid, element_index) if affected is not None and len(affected) > 0: if verbose: print(" ---- Number of elements affected: {0}".format(len(affected))) affected_elements = set().union(affected_elements, affected) else: if verbose: print(" >>>>> Warning: found no affected elements for this update. Possible orphan.") else: # Otherwise, set it to 1 oval_element.set("version", "1") # 5.4 Canonicalize all altered elements (if possible) # Now that we know all the elements affected by an update we can increment their IDs once if len(affected_elements) > 0: if verbose: print("\n ------- This update affects {0} upstream elements: incrementing the version number for each...".format(len(affected_elements))) for file in affected_elements: oval_element = lib_xml.load_standalone_element(file) if oval_element is not None: increment_version(oval_element) #oval_element = normalize_ids(oval_element, oval_id_map) update_elements[file] = oval_element # 6 Write the element, and remove the old if the path changed print("\n=============== Complete ===================") print("All automated checks have completed successfully, but the following") print(" manual checks need to be made prior to accepting this submission:") print(" * Metadata for definitions is complete and accurate") print(" * Existing metadata has not been changed") print(" * Contains a meaningful change") print(" * Does not contain any harmful actions or unacceptable language") for x in oval_id_map: print(" -- Convert %s to %s" % (x, oval_id_map[x])) response = input("\n :::: Save all changes now? (N[o] / y[es]): ") if response != 'y' and response != 'Y': return for path in update_elements: oval_element = normalize_ids(update_elements[path], oval_id_map) if not oval_element or oval_element is None: continue new_path = lib_repo.get_element_repository_path(oval_element) if verbose: print("## Writing {0}".format(new_path)) save_element(oval_element, new_path) if new_path != path: if verbose: print("### Deleting {0}".format(path)) os.remove(path) # 7. Prompt for a message to use for the commit # 7.1 Commit and push the changes return
def main(): """ parse command line options and call lib functions """ start_time = time.time() parser = argparse.ArgumentParser( description= 'Identify changes in current working directory as compared to a remote authoritative copy of the repo and identify all the elements affected by those changes.' ) # Note: I don't think we need to support files. If a file is submitted, CIS/QA can decompose it and then run this. So this can always run against repo. #parser.add_argument('-f', '--file', required=False, help='The name of the source file. If not used the local git repository will be used as the source') parser.add_argument('--silent', required=False, action="store_true", help='Suppress messages') parser.add_argument( '--remote', required=False, default='upstream', help="name of authoritative remote (default: 'upstream')") parser.add_argument( '--branch', required=False, default='master', help="name of branch in authoritative remote (default: 'master')") parser.add_argument( '--outfile', required=False, default='all.affected.oval.xml', help= "file name OVAL definitions file containing all affected definitions (default 'all.affected.oval.xml')" ) parser.add_argument('-s', '--schematron', default=False, action="store_true", help='schematron validate the affected definitions') args = vars(parser.parse_args()) silent = args['silent'] ## 1. Find Affected Elements # get changes in working dir vs. remote/branch message( 'info', 'Comparing working directory to {0}/{1}'.format( args['remote'], args['branch']), silent) paths_changed = lib_git.compare_current_oval_to_remote( args['remote'], args['branch']) if not paths_changed: message('info', 'No changes. Aborting.', silent) sys.exit(0) message( 'info', 'Found {0} files changed in working directory:\n\t{1}'.format( len(paths_changed), '\n\t'.join(paths_changed)), silent) # convert paths to oval ids oval_ids_changed = { lib_repo.path_to_oval_id(filepath) for filepath in paths_changed } message( 'info', 'Found {0} OVAL elements changed in working directory:\n\t{1}'.format( len(oval_ids_changed), '\n\t'.join(oval_ids_changed)), silent) # find all upstream ids message( 'info', 'Finding upstream OVAL ids for {0} element(s)'.format( len(oval_ids_changed)), silent) elements_index = lib_search.ElementsIndex(message) upstream_ids = elements_index.find_upstream_ids(oval_ids_changed, set()) message( 'info', 'Found {0} upstream OVAL ids (all element types)'.format( len(upstream_ids)), silent) affected_oval_ids = oval_ids_changed.union(upstream_ids) # filter affected to definition ids affected_def_ids = { oval_id for oval_id in upstream_ids if lib_repo.get_element_type_from_oval_id(oval_id) == 'definition' } message( 'info', 'Found {0} upstream OVAL definitions:\n\t{1}'.format( len(affected_def_ids), '\n\t'.join(affected_def_ids)), silent) ## 2. Build an OVAL Definitions File and Validate It! message('info', 'Building an OVAL definitions file for all affected definitions.', silent) # get all downstream elements oval_ids = elements_index.find_downstream_ids(affected_def_ids, affected_def_ids) file_paths = elements_index.get_paths_from_ids(oval_ids) # add each OVAL definition to generator and write to file message( 'info', "Generating OVAL definition file '{0}' with {1} elements".format( args['outfile'], len(oval_ids)), silent) OvalGenerator = lib_xml.OvalGenerator(message) for file_path in file_paths: element_type = lib_repo.get_element_type_from_path(file_path) OvalGenerator.queue_element_file(element_type, file_path) OvalGenerator.to_file(args['outfile']) # validate schema_path = lib_repo.get_oval_def_schema('5.11.1') message('info', 'Performing schema validation', silent) try: lib_xml.schema_validate(args['outfile'], schema_path) message('info', 'Schema validation successful', silent) except lib_xml.SchemaValidationError as e: message('error', 'Schema validation failed:\n\t{0}'.format(e.message), silent) if args['schematron']: # schematron validate schema_path = lib_repo.get_oval_def_schema('5.11.1') message('info', 'Performing schematron validation', silent) try: lib_xml.schematron_validate(args['outfile'], schema_path) message('info', 'Schematron validation successful', silent) except lib_xml.SchematronValidationError as e: message( 'error', 'Schematron validation failed:\n\t{0}'.format( '\n\t'.join(e.messages)), silent) #Find all downstream children -- that is, a search depth of one #Find all upstream users, all the way up to the definition #Sort the list: definitions, then tests, objects, states, and variables #Show the list #Offer to build an OVAL file that contains all the changes seconds_elapsed = time.time() - start_time message('info', 'Completed in {0}!'.format(format_duration(seconds_elapsed)), silent)