Beispiel #1
0
 def run_flextglgen(self, template_dir=None):
     args = Empty()
     args.download = False
     args.outdir = os.path.join(self.path, 'generated')
     args.template_dir = template_dir if template_dir else self.path
     flextGLgen.main('-D generated -t somepath profile.txt', args,
                     os.path.join(self.path, 'profile.txt'))
Beispiel #2
0
 def test_out(self):
     with self.assertRaises(SystemExit):
         args = Empty()
         args.download = False
         args.outdir = os.path.join(self.cwd, 'profile.txt')
         args.template_dir = os.path.join(self.root, 'templates')
         flextGLgen.main('', args, os.path.join(self.cwd, 'profile.txt'))
Beispiel #3
0
 def test_template(self):
     with self.assertRaises(SystemExit):
         args = Empty()
         args.download = False
         args.outdir = os.path.join(self.path, 'generated')
         args.template_dir = os.path.join(self.cwd, 'nonexistent')
         flextGLgen.main('', args, os.path.join(self.cwd, 'profile.txt'))
Beispiel #4
0
 def run_flextglgen(self, profile):
     args = Empty()
     args.download = False
     args.outdir = os.path.join(self.path, 'generated')
     args.template_dir = os.path.join(
         self.root, 'templates',
         self.template) if self.template else self.path
     flextGLgen.main('-D generated -t somepath ' + profile, args,
                     os.path.join(self.cwd, profile))
Beispiel #5
0
def _pelican_new_page(generator):
    # Set a dummy page referrer path so :ref-prefixes: works in Pelican as well
    # TODO: any chance this could be made non-crappy?
    global current_referer_path
    assert not current_referer_path or len(
        current_referer_path
    ) == 1 and current_referer_path[0][0].name == 'PAGE'
    type = Empty()  # We don't have the EntryType enum, so fake it
    type.name = 'PAGE'
    current_referer_path = [(type, '')]
Beispiel #6
0
def pretty_print_trie(serialized: bytes, show_merged=False, show_lookahead_barriers=True, colors=False):
    color_map = color_map_colors if colors else color_map_dummy

    hashtable = {}

    stats = Empty()
    stats.node_count = 0
    stats.max_node_results = 0
    stats.max_node_children = 0
    stats.max_node_result_index = 0
    stats.max_node_child_offset = 0

    out = _pretty_print_trie(serialized, hashtable, stats, Trie.root_offset_struct.unpack_from(serialized, 0)[0], '', show_merged=show_merged, show_lookahead_barriers=show_lookahead_barriers, color_map=color_map)
    if out: out = color_map['white'] + out
    stats = """
node count:             {}
max node results:       {}
max node children:      {}
max node result index:  {}
max node child offset:  {}""".lstrip().format(stats.node_count, stats.max_node_results, stats.max_node_children, stats.max_node_result_index, stats.max_node_child_offset)
    return out, stats
Beispiel #7
0
    def add(self,
            name,
            url,
            alias=None,
            suffix_length=0,
            flags=ResultFlag(0)) -> int:
        if suffix_length: flags |= ResultFlag.HAS_SUFFIX
        if alias is not None:
            assert flags & ResultFlag._TYPE == ResultFlag.ALIAS

        entry = Empty()
        entry.name = name
        entry.url = url
        entry.flags = flags
        entry.alias = alias
        entry.prefix = 0
        entry.prefix_length = 0
        entry.suffix_length = suffix_length

        self.entries += [entry]
        return len(self.entries) - 1
Beispiel #8
0
def merge_inventories(name_map, **kwargs):
    global intersphinx_inventory

    # Create inventory entries from the name_map
    internal_inventory = {}
    for path_str, entry in name_map.items():
        EntryType = type(entry.type)  # so we don't need to import the enum
        if entry.type == EntryType.MODULE:
            type_string = 'py:module'
        elif entry.type == EntryType.CLASS:
            type_string = 'py:class'
        elif entry.type == EntryType.FUNCTION:
            # TODO: properly distinguish between 'py:function',
            # 'py:classmethod', 'py:staticmethod', 'py:method'
            type_string = 'py:function'
        elif entry.type == EntryType.OVERLOADED_FUNCTION:
            # TODO: what about the other overloads?
            type_string = 'py:function'
        elif entry.type == EntryType.PROPERTY:
            # datetime.date.year is decorated with @property and listed as a
            # py:attribute, so that's probably it
            type_string = 'py:attribute'
        elif entry.type == EntryType.ENUM:
            type_string = 'py:enum'  # this desn't exist in Sphinx
        elif entry.type == EntryType.ENUM_VALUE:
            type_string = 'py:enumvalue'  # these don't exist in Sphinx
        elif entry.type == EntryType.DATA:
            type_string = 'py:data'
        elif entry.type == EntryType.PAGE:
            type_string = 'std:doc'
        elif entry.type == EntryType.SPECIAL:
            # TODO: this will cause duplicates when multiple m.css projects
            #   gets together, solve better
            type_string = 'std:special'
        else:  # pragma: no cover
            assert False

        # Mark those with m-doc (as internal)
        internal_inventory.setdefault(type_string,
                                      {})[path_str] = (entry.url, '-',
                                                       ['m-doc'])

    # Add class / enum / enum value inventory entries to the name map for type
    # cross-linking
    for type_, type_string in [
            # TODO: this will blow up if the above loop is never entered (which is
            # unlikely) as EntryType is defined there
        (EntryType.CLASS, 'py:class'),
            # Otherwise we can't link to standard exceptions from :raise:
        (EntryType.CLASS, 'py:exception'),  # TODO: special type for these?
        (EntryType.DATA, 'py:data'),  # typing.Tuple or typing.Any is data
            # Those are custom to m.css, not in Sphinx
        (EntryType.ENUM, 'py:enum'),
        (EntryType.ENUM_VALUE, 'py:enumvalue'),
    ]:
        if type_string in intersphinx_inventory:
            for path, value in intersphinx_inventory[type_string].items():
                url, _, css_classes = value
                entry = Empty()
                entry.type = type_
                entry.object = None
                entry.path = path.split('.')
                entry.css_classes = css_classes
                entry.url = url
                name_map[path] = entry

    # Add stuff from the name map to our inventory
    for type_, data_internal in internal_inventory.items():
        data = intersphinx_inventory.setdefault(type_, {})
        for path, value in data_internal.items():
            # Ignore duplicate things (such as `index` etc.)
            # TODO: solve better
            if path in data: continue
            data[path] = value

    # Save the internal inventory, if requested. Again basically a copy of
    # sphinx.util.inventory.InventoryFile.dump().
    if inventory_filename:
        with open(os.path.join(inventory_filename), 'wb') as f:
            # Header
            # TODO: user-defined project/version
            f.write(
                b'# Sphinx inventory version 2\n'
                b'# Project: X\n'
                b'# Version: 0\n'
                b'# The remainder of this file is compressed using zlib.\n')

            # Body. Sorting so it's in a reproducible order for testing.
            compressor = zlib.compressobj(9)
            for type_, data in sorted(internal_inventory.items()):
                for path, value in data.items():
                    url, title, css_classes = value
                    # The type has to contain a colon. Wtf is the 2?
                    assert ':' in type_
                    f.write(
                        compressor.compress('{} {} 2 {} {}\n'.format(
                            path, type_, url, title).encode('utf-8')))
            f.write(compressor.flush())
Beispiel #9
0
    def serialize(self, merge_prefixes=True) -> bytearray:
        output = bytearray()

        if merge_prefixes:
            # Put all entry names into a trie to discover common prefixes
            trie = Trie()
            for index, e in enumerate(self.entries):
                trie.insert(e.name, index)

            # Create a new list with merged prefixes
            merged = []
            for index, e in enumerate(self.entries):
                # Search in the trie and get the longest shared name prefix
                # that is already fully contained in some other entry
                current = trie
                longest_prefix = None
                for c in e.name.encode('utf-8'):
                    for candidate, child in current.children.items():
                        if c == candidate:
                            current = child[1]
                            break
                    else:
                        assert False  # pragma: no cover

                    # Allow self-reference only when referenced result suffix
                    # is longer (otherwise cycles happen). This is for
                    # functions that should appear when searching for foo (so
                    # they get ordered properly based on the name length) and
                    # also when searching for foo() (so everything that's not
                    # a function gets filtered out). Such entries are
                    # completely the same except for a different suffix length.
                    if index in current.results:
                        for i in current.results:
                            if self.entries[i].suffix_length > self.entries[
                                    index].suffix_length:
                                longest_prefix = current
                                break
                    elif current.results:
                        longest_prefix = current

                # Name prefix found, for all possible URLs find the one that
                # shares the longest prefix
                if longest_prefix:
                    max_prefix = (0, -1)
                    for longest_index in longest_prefix.results:
                        # Ignore self (function self-reference, see above)
                        if longest_index == index: continue

                        prefix_length = 0
                        for i in range(
                                min(len(e.url),
                                    len(self.entries[longest_index].url))):
                            if e.url[i] != self.entries[longest_index].url[i]:
                                break
                            prefix_length += 1
                        if max_prefix[1] < prefix_length:
                            max_prefix = (longest_index, prefix_length)

                    # Expect we found something
                    assert max_prefix[1] != -1

                    # Save the entry with reference to the prefix
                    entry = Empty()
                    assert e.name.startswith(
                        self.entries[longest_prefix.results[0]].name)
                    entry.name = e.name[len(self.entries[longest_prefix.
                                                         results[0]].name):]
                    entry.url = e.url[max_prefix[1]:]
                    entry.flags = e.flags | ResultFlag.HAS_PREFIX
                    entry.alias = e.alias
                    entry.prefix = max_prefix[0]
                    entry.prefix_length = max_prefix[1]
                    entry.suffix_length = e.suffix_length
                    merged += [entry]

                # No prefix found, copy the entry verbatim
                else:
                    merged += [e]

            # Everything merged, replace the original list
            self.entries = merged

        # Write the offset array. Starting offset for items is after the offset
        # array and the file size
        offset = (len(self.entries) + 1) * 4
        for e in self.entries:
            assert offset < 2**24
            output += self.offset_struct.pack(offset)
            self.flags_struct.pack_into(output, len(output) - 1, e.flags.value)

            # The entry is an alias, extra field for alias index
            if e.flags & ResultFlag._TYPE == ResultFlag.ALIAS:
                offset += self.alias_struct.size

            # Extra field for prefix index and length
            if e.flags & ResultFlag.HAS_PREFIX:
                offset += self.prefix_struct.size

            # Extra field for suffix length
            if e.flags & ResultFlag.HAS_SUFFIX:
                offset += self.suffix_length_struct.size

            # Length of the name
            offset += len(e.name.encode('utf-8'))

            # Length of the URL and 0-delimiter. If URL is empty, it's not
            # added at all, then the 0-delimiter is also not needed.
            if e.name and e.url:
                offset += len(e.url.encode('utf-8')) + 1

        # Write file size
        output += self.offset_struct.pack(offset)

        # Write the entries themselves
        for e in self.entries:
            if e.flags & ResultFlag._TYPE == ResultFlag.ALIAS:
                assert not e.alias is None
                assert not e.url
                output += self.alias_struct.pack(e.alias)
            if e.flags & ResultFlag.HAS_PREFIX:
                output += self.prefix_struct.pack(e.prefix, e.prefix_length)
            if e.flags & ResultFlag.HAS_SUFFIX:
                output += self.suffix_length_struct.pack(e.suffix_length)
            output += e.name.encode('utf-8')
            if e.url:
                output += b'\0'
                output += e.url.encode('utf-8')

        assert len(output) == offset
        return output
Beispiel #10
0
 def run_flextglgen(self, profile):
     args = Empty()
     args.download = False
     args.outdir = os.path.join(self.cwd, 'generated')
     args.template = 'glfw'
     flextGLgen.main('', args, os.path.join(self.cwd, profile))