Example #1
0
    def runtest(self):
        from asdf import AsdfFile, block, util
        from asdf.tests import helpers

        name, version = parse_schema_filename(self.filename)
        if should_skip(name, version):
            return

        standard_version = self._find_standard_version(name, version)

        # Make sure that the examples in the schema files (and thus the
        # ASDF standard document) are valid.
        buff = helpers.yaml_to_asdf(
            'example: ' + self.example.strip(), standard_version=standard_version)

        ff = AsdfFile(
            uri=util.filepath_to_url(os.path.abspath(self.filename)),
            ignore_unrecognized_tag=self.ignore_unrecognized_tag,
            ignore_version_mismatch=self.ignore_version_mismatch,
        )

        # Fake an external file
        ff2 = AsdfFile({'data': np.empty((1024*1024*8), dtype=np.uint8)})

        ff._external_asdf_by_uri[
            util.filepath_to_url(
                os.path.abspath(
                    os.path.join(
                        os.path.dirname(self.filename), 'external.asdf')))] = ff2

        # Add some dummy blocks so that the ndarray examples work
        for i in range(3):
            b = block.Block(np.zeros((1024*1024*8), dtype=np.uint8))
            b._used = True
            ff.blocks.add(b)
        b._array_storage = "streamed"

        try:
            with pytest.warns(None) as w:
                ff._open_impl(ff, buff, mode='rw')
            # Do not tolerate any warnings that occur during schema validation
            assert len(w) == 0, helpers.display_warnings(w)
        except Exception:
            print("From file:", self.filename)
            raise

        # Just test we can write it out.  A roundtrip test
        # wouldn't always yield the correct result, so those have
        # to be covered by "real" unit tests.
        if b'external.asdf' not in buff.getvalue():
            buff = io.BytesIO()
            ff.write_to(buff)
Example #2
0
    def runtest(self):
        from asdf import AsdfFile, block, util
        from asdf.tests import helpers
        from .extension import TestExtension

        name, version = parse_schema_filename(self.filename)
        if should_skip(name, version):
            return

        standard_version = self._find_standard_version(name, version)

        # Make sure that the examples in the schema files (and thus the
        # ASDF standard document) are valid.
        buff = helpers.yaml_to_asdf(
            'example: ' + self.example.strip(), standard_version=standard_version)
        ff = AsdfFile(
            uri=util.filepath_to_url(os.path.abspath(self.filename)),
            extensions=TestExtension())

        # Fake an external file
        ff2 = AsdfFile({'data': np.empty((1024*1024*8), dtype=np.uint8)})

        ff._external_asdf_by_uri[
            util.filepath_to_url(
                os.path.abspath(
                    os.path.join(
                        os.path.dirname(self.filename), 'external.asdf')))] = ff2

        # Add some dummy blocks so that the ndarray examples work
        for i in range(3):
            b = block.Block(np.zeros((1024*1024*8), dtype=np.uint8))
            b._used = True
            ff.blocks.add(b)
        b._array_storage = "streamed"

        try:
            with pytest.warns(None) as w:
                import warnings
                ff._open_impl(ff, buff, mode='rw')
            # Do not tolerate any warnings that occur during schema validation
            assert len(w) == 0, helpers.display_warnings(w)
        except Exception:
            print("From file:", self.filename)
            raise

        # Just test we can write it out.  A roundtrip test
        # wouldn't always yield the correct result, so those have
        # to be covered by "real" unit tests.
        if b'external.asdf' not in buff.getvalue():
            buff = io.BytesIO()
            ff.write_to(buff)
Example #3
0
    def run(self):
        filename = self.arguments[0]

        cwd = os.getcwd()
        os.chdir(TMPDIR)

        parts = []
        try:
            ff = AsdfFile()
            code = AsdfFile._open_impl(ff, filename, _get_yaml_content=True)
            code = '{0} {1}\n'.format(ASDF_MAGIC, version_string) + code.strip().decode('utf-8')
            literal = nodes.literal_block(code, code)
            literal['language'] = 'yaml'
            set_source_info(self, literal)
            parts.append(literal)

            kwargs = dict()
            # Use the ignore_unrecognized_tag parameter as a proxy for both options
            kwargs['ignore_unrecognized_tag'] = 'ignore_unrecognized_tag' in self.arguments
            kwargs['ignore_missing_extensions'] = 'ignore_unrecognized_tag' in self.arguments

            with AsdfFile.open(filename, **kwargs) as ff:
                for i, block in enumerate(ff.blocks.internal_blocks):
                    data = codecs.encode(block.data.tostring(), 'hex')
                    if len(data) > 40:
                        data = data[:40] + '...'.encode()
                    allocated = block._allocated
                    size = block._size
                    data_size = block._data_size
                    flags = block._flags

                    if flags & BLOCK_FLAG_STREAMED:
                        allocated = size = data_size = 0

                    lines = []
                    lines.append('BLOCK {0}:'.format(i))

                    human_flags = []
                    for key, val in FLAGS.items():
                        if flags & key:
                            human_flags.append(val)
                    if len(human_flags):
                        lines.append('    flags: {0}'.format(' | '.join(human_flags)))
                    if block.input_compression:
                        lines.append('    compression: {0}'.format(block.input_compression))
                    lines.append('    allocated_size: {0}'.format(allocated))
                    lines.append('    used_size: {0}'.format(size))
                    lines.append('    data_size: {0}'.format(data_size))
                    lines.append('    data: {0}'.format(data))

                    code = '\n'.join(lines)

                    literal = nodes.literal_block(code, code)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

                internal_blocks = list(ff.blocks.internal_blocks)
                if (len(internal_blocks) and
                    internal_blocks[-1].array_storage != 'streamed'):
                    buff = io.BytesIO()
                    ff.blocks.write_block_index(buff, ff)
                    block_index = buff.getvalue().decode('utf-8')
                    literal = nodes.literal_block(block_index, block_index)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

        finally:
            os.chdir(cwd)

        result = nodes.literal_block()
        textnodes, messages = self.state.inline_text(filename, self.lineno)
        title = nodes.title(filename, '', *textnodes)
        result += title
        result += parts
        return [result]
Example #4
0
    def run(self):
        filename = self.arguments[0]

        cwd = os.getcwd()
        os.chdir(TMPDIR)

        parts = []
        try:
            ff = AsdfFile()
            code = AsdfFile._open_impl(ff, filename, _get_yaml_content=True)
            code = '{0} {1}\n'.format(
                ASDF_MAGIC, version_string) + code.strip().decode('utf-8')
            literal = nodes.literal_block(code, code)
            literal['language'] = 'yaml'
            set_source_info(self, literal)
            parts.append(literal)

            with AsdfFile.open(filename) as ff:
                for i, block in enumerate(ff.blocks.internal_blocks):
                    data = codecs.encode(block.data.tostring(), 'hex')
                    if len(data) > 40:
                        data = data[:40] + '...'.encode()
                    allocated = block._allocated
                    size = block._size
                    data_size = block._data_size
                    flags = block._flags

                    if flags & BLOCK_FLAG_STREAMED:
                        allocated = size = data_size = 0

                    lines = []
                    lines.append('BLOCK {0}:'.format(i))

                    human_flags = []
                    for key, val in FLAGS.items():
                        if flags & key:
                            human_flags.append(val)
                    if len(human_flags):
                        lines.append('    flags: {0}'.format(
                            ' | '.join(human_flags)))
                    if block.input_compression:
                        lines.append('    compression: {0}'.format(
                            block.input_compression))
                    lines.append('    allocated_size: {0}'.format(allocated))
                    lines.append('    used_size: {0}'.format(size))
                    lines.append('    data_size: {0}'.format(data_size))
                    lines.append('    data: {0}'.format(data))

                    code = '\n'.join(lines)

                    literal = nodes.literal_block(code, code)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

                internal_blocks = list(ff.blocks.internal_blocks)
                if (len(internal_blocks)
                        and internal_blocks[-1].array_storage != 'streamed'):
                    buff = io.BytesIO()
                    ff.blocks.write_block_index(buff, ff)
                    block_index = buff.getvalue()
                    literal = nodes.literal_block(block_index, block_index)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

        finally:
            os.chdir(cwd)

        result = nodes.admonition()
        textnodes, messages = self.state.inline_text(filename, self.lineno)
        title = nodes.title(filename, '', *textnodes)
        result += title
        result += parts
        return [result]