Ejemplo n.º 1
0
    def get_remote_schema(self, uri):
        """Attempt to retrieve schema by URI (or find it in our cache)"""

        if '#' in uri:
            uri, path = uri.split('#')

        schema_data = self.remote_schemas.get(uri)

        if schema_data:
            return schema_data

        # Do we have a mapping for this remote URI to a local path?
        if '://' in uri:
            protocol, uri_part = uri.split('://')
        else:
            uri_part = uri
        for partial_uri in self.uri_to_local.keys():
            if uri_part.startswith(partial_uri):
                local_uri = self.uri_to_local[partial_uri] + uri_part[len(partial_uri):]
                schema_data = DocGenUtilities.load_as_json(local_uri)
                # This will fall through to getting the schema remotely if this fails. Correct?
                if schema_data:
                    return schema_data

        schema_data = DocGenUtilities.http_load_as_json(uri)
        if schema_data:
            schema_data['_schema_name'] = self.find_schema_name(uri, schema_data)
            self.remote_schemas[uri] = schema_data
            return schema_data

        return None
Ejemplo n.º 2
0
    def merge_required_profile(self, profile_resources, req_profile_name,
                               req_profile_info):
        """ Merge a required profile into profile_resources (a dict). May result in recursive calls. """

        req_profile_repo = req_profile_info.get(
            'Repository', 'http://redfish.dmtf.org/profiles')
        req_profile_minversion = req_profile_info.get('MinVersion', '1.0.0')
        version_string = req_profile_minversion.replace('.', '_')
        req_profile_data = None

        # Retrieve profile.
        # req_profile_repo will be a fully-qualified URI. It may be overridden by
        # uri-to-local mapping.
        base_uri = '/'.join([req_profile_repo, req_profile_name])
        if '://' in base_uri:  # This is expected.
            protocol, base_uri = base_uri.split('://')

        is_local_file = False
        for partial_uri in self.config['profile_uri_to_local'].keys():
            if base_uri.startswith(partial_uri):
                local_path = self.config['profile_uri_to_local'][partial_uri]
                if partial_uri.endswith(req_profile_name):
                    req_profile_repo = local_path[0:-len(req_profile_name)]
                    pass
                else:
                    req_profile_repo = local_path
                is_local_file = True
                break

        req_profile_uri = self.get_versioned_uri(req_profile_name,
                                                 req_profile_repo,
                                                 version_string, is_local_file)

        if not req_profile_uri:
            warnings.warn("Unable to find Profile for " + req_profile_repo +
                          ", " + req_profile_name + ", minimum version: " +
                          req_profile_minversion)
            return profile_resources

        if is_local_file:
            req_profile_data = DocGenUtilities.load_as_json(req_profile_uri)
        else:
            req_profile_data = DocGenUtilities.http_load_as_json(
                req_profile_uri)

        if req_profile_data:
            if 'RequiredProfiles' in req_profile_data:
                for req_profile_name in req_profile_data[
                        'RequiredProfiles'].keys():
                    profile_resources = self.merge_required_profile(
                        profile_resources, req_profile_name,
                        req_profile_data['RequiredProfiles'][req_profile_name])

            profile_resources = self.merge_dicts(
                profile_resources, req_profile_data.get('Resources', {}))

        return profile_resources
Ejemplo n.º 3
0
    def generate_docs(self, level=0):
        """Given a list of files, generate a block of documentation.

        This is the main loop of the product.
        """
        files_to_process = self.get_files(self.import_from)
        files, schema_data = self.group_files(files_to_process)

        property_data = {}
        doc_generator_meta = {}

        for normalized_uri in files.keys():
            data = self.process_files(normalized_uri, files[normalized_uri])
            if not data:
                # If we're in profile mode, this is probably normal.
                if not self.config['profile_mode']:
                    warnings.warn("Unable to process files for " +
                                  normalized_uri)
                continue
            property_data[normalized_uri] = data
            doc_generator_meta[normalized_uri] = property_data[normalized_uri][
                'doc_generator_meta']
            latest_info = files[normalized_uri][-1]
            latest_file = os.path.join(latest_info['root'],
                                       latest_info['filename'])
            latest_data = DocGenUtilities.load_as_json(latest_file)
            latest_data['_is_versioned_schema'] = latest_info.get(
                '_is_versioned_schema')
            latest_data['_is_collection_of'] = latest_info.get(
                '_is_collection_of')
            latest_data['_schema_name'] = latest_info.get('schema_name')
            schema_data[normalized_uri] = latest_data

        traverser = SchemaTraverser(schema_data, doc_generator_meta,
                                    self.config['uri_to_local'])

        # Generate output
        if self.config['output_format'] == 'markdown':
            from doc_formatter import MarkdownGenerator
            generator = MarkdownGenerator(property_data, traverser,
                                          self.config, level)
        elif self.config['output_format'] == 'html':
            from doc_formatter import HtmlGenerator
            generator = HtmlGenerator(property_data, traverser, self.config,
                                      level)
        elif self.config['output_format'] == 'csv':
            from doc_formatter import CsvGenerator
            generator = CsvGenerator(property_data, traverser, self.config,
                                     level)

        return generator.generate_output()
Ejemplo n.º 4
0
    def __init__(self, import_from, outfile, config):
        self.config = config
        self.import_from = import_from
        self.outfile = outfile

        if config['profile_mode']:
            config['profile'] = DocGenUtilities.load_as_json(
                config.get('profile_doc'))
            profile_resources = {}

            if 'RequiredProfiles' in config['profile']:
                for req_profile_name in config['profile'][
                        'RequiredProfiles'].keys():
                    profile_resources = self.merge_required_profile(
                        profile_resources, req_profile_name, config['profile']
                        ['RequiredProfiles'][req_profile_name])

            if 'Registries' in config['profile']:
                config['profile']['registries_annotated'] = {}
                for registry_name in config['profile']['Registries'].keys():
                    registry_summary = self.process_registry(
                        registry_name,
                        config['profile']['Registries'][registry_name])
                    config['profile']['registries_annotated'][
                        registry_name] = registry_summary

            profile_resources = self.merge_dicts(
                profile_resources,
                self.config.get('profile', {}).get('Resources', {}))

            if not profile_resources:
                warnings.warn(
                    'No profile resource data found; unable to produce profile mode documentation.'
                )
                exit()

            # Index profile_resources by Repository & schema name
            profile_resources_indexed = {}
            for schema_name in profile_resources.keys():
                profile_data = profile_resources[schema_name]
                repository = profile_data.get('Repository',
                                              'redfish.dmtf.org/schemas/v1')
                normalized_uri = repository + '/' + schema_name + '.json'
                profile_data['Schema_Name'] = schema_name
                profile_resources_indexed[normalized_uri] = profile_data

            self.config['profile_resources'] = profile_resources_indexed
Ejemplo n.º 5
0
def test_load_as_json_file_not_good_warns():
    with pytest.warns(UserWarning):
        data = DocGenUtilities.load_as_json(
            os.path.join(sampledir, 'badjson.json'))
        assert data == {}
Ejemplo n.º 6
0
def test_load_as_json():
    data = DocGenUtilities.load_as_json(os.path.join(sampledir, '1.json'))
    assert data['foo'] == 'bar' and data['baz'] == ['foo', 'bar', 'baz']
Ejemplo n.º 7
0
def test_load_as_json_file_not_good_warns():
    with pytest.warns(UserWarning):
        data = DocGenUtilities.load_as_json(os.path.join(sampledir, 'badjson.json'))
        assert data == {}
Ejemplo n.º 8
0
def test_load_as_json():
    data = DocGenUtilities.load_as_json(os.path.join(sampledir, '1.json'))
    assert data['foo'] == 'bar' and data['baz'] == ['foo', 'bar', 'baz']
Ejemplo n.º 9
0
    def process_data_file(self, schema_ref, ref, property_data):
        """Process a single file by ref name, identifying metadata and updating property_data."""

        filename = os.path.join(ref['root'], ref['filename'])
        normalized_uri = self.construct_uri_for_filename(filename)

        # Get the un-versioned filename for match against profile keys
        if '.v' in filename:
            generalized_uri = self.construct_uri_for_filename(
                filename.split('.v')[0]) + '.json'
        else:
            generalized_uri = self.construct_uri_for_filename(filename)

        profile_mode = self.config['profile_mode']
        profile = self.config['profile_resources']

        data = DocGenUtilities.load_as_json(filename)
        schema_name = SchemaTraverser.find_schema_name(filename, data, True)
        version = self.get_version_string(ref['filename'])

        property_data['schema_name'] = schema_name
        property_data['latest_version'] = version
        property_data['name_and_version'] = schema_name
        property_data['normalized_uri'] = normalized_uri

        min_version = False
        if profile_mode:
            schema_profile = profile.get(generalized_uri)
            if schema_profile:
                min_version = schema_profile.get('MinVersion')
                if min_version:
                    if version:
                        property_data[
                            'name_and_version'] += ' v' + min_version + '+ (current release: v' + version + ')'
                    else:
                        # this is unlikely
                        property_data[
                            'name_and_version'] += ' v' + min_version + '+'
            else:
                # Skip schemas that aren't mentioned in the profile:
                return {}
        elif version:
            property_data['name_and_version'] += ' ' + version

        if 'properties' not in property_data:
            property_data['properties'] = {}
        meta = property_data.get('doc_generator_meta',
                                 {'schema_name': schema_name})

        if version == '1.0.0':
            version = None

        if (not version) and (schema_ref in property_data):
            warnings.warn(
                'Check', schema_ref, 'for version problems.',
                'Are there two files with either version 1.0.0 or no version?')

        try:
            property_data['definitions'] = data['definitions']
            for ref_part in ref['ref'].split('/'):
                if not ref_part:
                    continue
                data = data[ref_part]

            # resolve anyOf to embedded object, if present:
            if 'anyOf' in data:
                for elt in data['anyOf']:
                    if ('type' in elt) and (elt['type'] == 'object'):
                        data = elt
                        break

            properties = data['properties']
            property_data['properties'] = properties

        except KeyError:
            warnings.warn('Unable to find properties in path ' + ref['ref'] +
                          ' from ' + filename)
            return {}

        meta = self.extend_metadata(meta, properties, version,
                                    normalized_uri + '#properties/')
        meta['definitions'] = meta.get('definitions', {})
        definitions = property_data['definitions']
        meta['definitions'] = self.extend_metadata(
            meta['definitions'], definitions, version,
            normalized_uri + '#definitions/')
        property_data['doc_generator_meta'] = meta

        return property_data
Ejemplo n.º 10
0
    def group_files(self, files):
        """Traverse files, grouping any unversioned/versioned schemas together.

        Parses json to identify versioned files.
        Returns a dict of {normalized_uri : [versioned files]} where each
        versioned file is a dict of {root, filename, ref path, schema_name,
        _is_versioned_schema, _is_collection_of}.
        """

        file_list = [os.path.abspath(filename) for filename in files]
        grouped_files = {}
        all_schemas = {}
        missing_files = []
        processed_files = []

        for filename in file_list:
            # Get the (probably versioned) filename, and save the data:
            root, _, fname = filename.rpartition(os.sep)

            data = DocGenUtilities.load_as_json(filename)

            schema_name = SchemaTraverser.find_schema_name(fname, data)
            if schema_name is None: continue

            normalized_uri = self.construct_uri_for_filename(filename)

            data['_schema_name'] = schema_name
            all_schemas[normalized_uri] = data

            if filename in processed_files: continue

            ref = ''
            if '$ref' in data:
                ref = data['$ref'][1:]  # drop initial '#'
            else:
                continue

            if fname.count('.') > 1:
                continue

            original_ref = ref
            for pathpart in ref.split('/'):
                if not pathpart: continue
                data = data[pathpart]

            ref_files = []

            # is_versioned_schema will be True if there is an "anyOf" pointing to one or more versioned files.
            is_versioned_schema = False

            # is_collection_of will contain the type of objects in the collection.
            is_collection_of = None

            if 'anyOf' in data:
                for obj in data['anyOf']:
                    if '$ref' in obj:
                        refpath_uri, refpath_path = obj['$ref'].split('#')
                        if refpath_path == '/definitions/idRef':
                            is_versioned_schema = True
                            continue
                        ref_fn = refpath_uri.split('/')[-1]
                        # Skip files that are not present.
                        ref_filename = os.path.abspath(
                            os.path.join(root, ref_fn))
                        if ref_filename in file_list:
                            ref_files.append({
                                'root': root,
                                'filename': ref_fn,
                                'ref': refpath_path,
                                'schema_name': schema_name
                            })
                        elif ref_filename not in missing_files:
                            missing_files.append(ref_filename)

                    else:
                        # If there is anything that's not a ref, this isn't an unversioned schema.
                        # It's probably a Collection. Zero out ref_files and skip the rest so we
                        # can save this as a single-file group.
                        if 'properties' in obj:
                            if 'Members' in obj['properties']:
                                # It's a collection. What is it a collection of?
                                member_ref = obj['properties']['Members'].get(
                                    'items', {}).get('$ref')
                                if member_ref:
                                    is_collection_of = self.normalize_ref(
                                        member_ref)
                        ref_files = []
                        continue

            elif '$ref' in data:
                refpath_uri, refpath_path = data['$ref'].split('#')
                if refpath_path == '/definitions/idRef':
                    continue

                ref_fn = refpath_uri.split('/')[-1]
                # Skip files that are not present.
                ref_filename = os.path.abspath(os.path.join(root, ref_fn))
                if ref_filename in file_list:
                    ref_files.append({
                        'root': root,
                        'filename': ref_fn,
                        'ref': refpath_path,
                        'schema_name': schema_name
                    })
                elif ref_filename not in missing_files:
                    missing_files.append(ref_filename)

            else:
                ref = original_ref

            if len(ref_files):
                # Add the _is_versioned_schema and  is_collection_of hints to each ref object
                [
                    x.update({
                        '_is_versioned_schema': is_versioned_schema,
                        '_is_collection_of': is_collection_of
                    }) for x in ref_files
                ]
                grouped_files[normalized_uri] = ref_files

            if not normalized_uri in grouped_files:
                # this is not an unversioned schema after all.
                grouped_files[normalized_uri] = [{
                    'root':
                    root,
                    'filename':
                    fname,
                    'ref':
                    ref,
                    'schema_name':
                    schema_name,
                    '_is_versioned_schema':
                    is_versioned_schema,
                    '_is_collection_of':
                    is_collection_of
                }]

            # Note these files as processed:
            processed_files.append(filename)
            for file_refs in grouped_files[normalized_uri]:
                ref_filename = os.path.join(file_refs['root'],
                                            file_refs['filename'])
                processed_files.append(ref_filename)

        if len(missing_files):
            numfiles = len(missing_files)
            if numfiles <= 10:
                missing_files_list = '\n   '.join(missing_files)
            else:
                missing_files_list = '\n   '.join(
                    missing_files[0:9]) + "\n   and " + str(numfiles -
                                                            10) + " more."
            warnings.warn(
                str(numfiles) + " referenced files were missing: \n   " +
                missing_files_list)

        return grouped_files, all_schemas