Пример #1
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('C Pragma Injector expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        logger.debug('Injecting pragmas into: %s', artifact.location)
        injector = _CTextReaderPragmas(
            FileTextReader(artifact.location))

        output_file = self._workspace / artifact.location.name

        out_lines = [line for line in injector.line_by_line()]

        with output_file.open('w') as out_file:
            for line in out_lines:
                out_file.write(line)

        new_artifact = Artifact(output_file,
                                artifact.filetype,
                                Modified)
        for dependency in artifact.depends_on:
            new_artifact.add_dependency(dependency)

        return [new_artifact]
Пример #2
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('C Compiler expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        command = [self._compiler]
        command.extend(self._flags)
        command.append(str(artifact.location))

        output_file = (self._workspace /
                       artifact.location.with_suffix('.o').name)
        command.extend(['-o', str(output_file)])

        logger.debug('Running command: ' + ' '.join(command))
        subprocess.run(command, check=True)

        object_artifact = Artifact(output_file,
                                   BinaryObject,
                                   Compiled)
        for definition in artifact.defines:
            object_artifact.add_definition(definition)

        return [object_artifact]
Пример #3
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('Header Analyser expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        new_artifact = Artifact(artifact.location, artifact.filetype,
                                HeadersAnalysed)

        reader = FileTextReader(artifact.location)
        logger.debug('Looking for headers in: %s', reader.filename)
        for line in reader.line_by_line():
            include_match: Optional[Match] \
                = self._include_pattern.match(line)
            if include_match:
                include: str = include_match.group(1)
                logger.debug('Found header: %s', include)
                if include.startswith(('"', "'")):
                    include = include.strip('"').strip("'")
                    logger.debug('  * User header; adding dependency')
                    new_artifact.add_dependency(Path(self._workspace /
                                                     include))

        return [new_artifact]
Пример #4
0
    def test_harvested_data(self, caplog, tmp_path):
        """
        Checks that the analyser deals with rescanning a file.
        """
        caplog.set_level(logging.DEBUG)

        first_file: Path = tmp_path / 'other.F90'
        first_file.write_text(
            dedent('''
                   program betty
                     use barney_mod, only :: dino
                     implicit none
                   end program betty

                   module barney_mod
                   end module barney_mod
                   '''))
        second_file: Path = tmp_path / 'test.f90'
        second_file.write_text(
            dedent('''
                   module barney_mod
                   end module barney_mod
                   '''))

        database: SqliteStateDatabase = SqliteStateDatabase(tmp_path)
        test_unit = FortranAnalyser(tmp_path)
        first_artifact = Artifact(first_file, FortranSource, Raw)
        second_artifact = Artifact(second_file, FortranSource, Raw)
        # Not going to test returned objects this time
        _ = test_unit.run([first_artifact])
        _ = test_unit.run([second_artifact])

        # Confirm the database has been updated
        fdb = FortranWorkingState(database)
        assert list(iter(fdb)) \
            == [FortranInfo(FortranUnitID('barney_mod', first_file)),
                FortranInfo(FortranUnitID('barney_mod', second_file)),
                FortranInfo(FortranUnitID('betty', first_file),
                            ['barney_mod'])]
        assert list(fdb.depends_on(FortranUnitID('betty', first_file))) \
            == [FortranUnitID('barney_mod', tmp_path / 'other.F90'),
                FortranUnitID('barney_mod', tmp_path / 'test.f90')]

        # Repeat the scan of second_file, there should be no change.
        #
        _ = test_unit.run([second_artifact])

        fdb = FortranWorkingState(database)
        assert list(iter(fdb)) \
            == [FortranInfo(FortranUnitID('barney_mod', first_file)),
                FortranInfo(FortranUnitID('barney_mod', second_file)),
                FortranInfo(FortranUnitID('betty', first_file),
                            ['barney_mod'])]
        assert list(fdb.depends_on(FortranUnitID('betty', first_file))) \
            == [FortranUnitID('barney_mod', tmp_path / 'other.F90'),
                FortranUnitID('barney_mod', tmp_path / 'test.f90')]
Пример #5
0
    def test_run(self, tmp_path):
        workspace = tmp_path / 'working'
        workspace.mkdir()

        test_file: Path = tmp_path / 'test.c'
        test_file.write_text(
            dedent('''
                   #include "user_include.h"
                   Unrelated text
                   #include 'another_user_include.h'
                   #include <system_include.h>
                   More unrelated text
                   #include <another_system_include.h>
                   '''))
        test_artifact = Artifact(test_file, CSource, HeadersAnalysed)
        test_artifact.add_dependency('foo')

        # Run the Injector
        injector = CPragmaInjector(workspace)
        artifacts_out = injector.run([test_artifact])

        assert len(artifacts_out) == 1
        assert artifacts_out[0].location == workspace / 'test.c'
        assert artifacts_out[0].filetype is CSource
        assert artifacts_out[0].state is Modified
        assert artifacts_out[0].depends_on == ['foo']
        assert artifacts_out[0].defines == []

        new_file = workspace / 'test.c'
        assert new_file.exists()
        with new_file.open('r') as fh:
            new_text = fh.read()

        expected_text = (dedent('''
                   #pragma FAB UsrIncludeStart
                   #include "user_include.h"
                   #pragma FAB UsrIncludeEnd
                   Unrelated text
                   #pragma FAB UsrIncludeStart
                   #include 'another_user_include.h'
                   #pragma FAB UsrIncludeEnd
                   #pragma FAB SysIncludeStart
                   #include <system_include.h>
                   #pragma FAB SysIncludeEnd
                   More unrelated text
                   #pragma FAB SysIncludeStart
                   #include <another_system_include.h>
                   #pragma FAB SysIncludeEnd
                   '''))

        assert new_text == expected_text
Пример #6
0
    def test_run(self, tmp_path):
        # Create a file to analyse
        test_file: Path = tmp_path / 'test.c'
        test_file.write_text(
            dedent('''
                   #include "user_include.h"
                   Unrelated text
                   #include 'another_user_include.h'
                   #include <system_include.h>
                   More unrelated text
                   #include <another_system_include.h>
                   '''))
        test_artifact = Artifact(test_file,
                                 Unknown,
                                 New)

        # Run the Analyser
        header_analyser = HeaderAnalyser(tmp_path)
        artifacts_out = header_analyser.run([test_artifact])

        expected_dependencies = [tmp_path / 'user_include.h',
                                 tmp_path / 'another_user_include.h']

        assert len(artifacts_out) == 1
        assert artifacts_out[0].location == test_file
        assert artifacts_out[0].filetype is Unknown
        assert artifacts_out[0].state is HeadersAnalysed
        assert artifacts_out[0].depends_on == expected_dependencies
        assert artifacts_out[0].defines == []
Пример #7
0
    def test_run(self, mocker, tmp_path: Path):
        # Instantiate Compiler
        workspace = tmp_path / 'working'
        workspace.mkdir()
        compiler = CCompiler('fred', ['--barney', '--wilma'], workspace)

        # Create artifact
        artifact = Artifact(Path(tmp_path / 'flintstone.c'), CSource, Analysed)

        # Monkeypatch the subprocess call out and run
        patched_run = mocker.patch('subprocess.run')
        artifacts_out = compiler.run([artifact])

        # Check that the subprocess call contained the command
        # that we would expect based on the above
        expected_command = [
            'fred', '--barney', '--wilma',
            str(tmp_path / 'flintstone.c'), '-o',
            str(workspace / 'flintstone.o')
        ]
        patched_run.assert_any_call(expected_command, check=True)

        assert len(artifacts_out) == 1
        assert artifacts_out[0].location == workspace / 'flintstone.o'
        assert artifacts_out[0].filetype is BinaryObject
        assert artifacts_out[0].state is Compiled
        assert artifacts_out[0].depends_on == []
        assert artifacts_out[0].defines == []
Пример #8
0
    def test_run(self, mocker, tmp_path: Path):
        # Instantiate Preprocessor
        workspace = tmp_path / 'working'
        workspace.mkdir()
        preprocessor = CPreProcessor('foo', ['--bar', '--baz'], workspace)

        # Create artifact
        artifact = Artifact(Path(tmp_path / 'foo.c'), CSource, Seen)

        # Monkeypatch the subprocess call out and run
        patched_run = mocker.patch('subprocess.run')
        artifacts_out = preprocessor.run([artifact])

        # Check that the subprocess call contained the command
        # that we would expect based on the above
        expected_pp_command = [
            'foo', '--bar', '--baz',
            str(tmp_path / 'foo.c'),
            str(workspace / 'foo.fabcpp')
        ]
        patched_run.assert_any_call(expected_pp_command, check=True)

        expected_mv_command = [
            'mv',
            str(workspace / 'foo.fabcpp'),
            str(workspace / 'foo.c')
        ]
        patched_run.assert_any_call(expected_mv_command, check=True)

        assert len(artifacts_out) == 1
        assert artifacts_out[0].location == workspace / 'foo.c'
        assert artifacts_out[0].filetype is CSource
        assert artifacts_out[0].state is Raw
        assert artifacts_out[0].depends_on == []
        assert artifacts_out[0].defines == []
Пример #9
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('C Preprocessor expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        command = [self._preprocessor]
        command.extend(self._flags)
        command.append(str(artifact.location))

        # Use temporary output name (in case the given tool
        # can't operate in-place)
        output_file = (self._workspace /
                       artifact.location.with_suffix('.fabcpp').name)

        command.append(str(output_file))
        logger.debug('Running command: ' + ' '.join(command))
        subprocess.run(command, check=True)

        # Overwrite actual output file
        final_output = (self._workspace /
                        artifact.location.name)
        command = ["mv", str(output_file), str(final_output)]
        logger.debug('Running command: ' + ' '.join(command))
        subprocess.run(command, check=True)

        return [Artifact(final_output,
                         artifact.filetype,
                         Raw)]
Пример #10
0
    def test_constructor(self):
        test_path = Path('/test/path')
        artifact = Artifact(test_path, Unknown, New)

        assert artifact.location == test_path
        assert artifact.state is New
        assert artifact.filetype is Unknown
        assert artifact.depends_on == []
        assert artifact.defines == []
Пример #11
0
 def test_hash(self, tmp_path: Path):
     test_path = Path(tmp_path / 'test.foo')
     test_path.write_text("Lorem ipsum dolor sit")
     expected_hash = 1463158782
     artifact = Artifact(test_path, Unknown, New)
     assert artifact.hash == expected_hash
     # Check that it is cached
     test_path.unlink()
     assert artifact.hash == expected_hash
Пример #12
0
    def test_run(self, mocker, tmp_path: Path):
        # Instantiate Linker
        workspace = Path(tmp_path)
        linker = Linker('foo',
                        ['--bar', '--baz'],
                        workspace,
                        'qux')

        # Create artifacts (object files for linking)
        file1 = '/path/to/file.1'
        file2 = '/path/to/file.2'
        artifacts = [Artifact(Path(file1),
                              Unknown,
                              New),
                     Artifact(Path(file2),
                              Unknown,
                              New)]

        # Monkeypatch the subprocess call out and run linker
        patched_run = mocker.patch('subprocess.run')
        artifacts_out = linker.run(artifacts)

        # Check that the subprocess call contained the command
        # that we would expect based on the above
        expected_command = ['foo',
                            '-o',
                            str(workspace / 'qux'),
                            file1,
                            file2,
                            '--bar',
                            '--baz']
        patched_run.assert_called_once_with(expected_command,
                                            check=True)
        assert len(artifacts_out) == 1
        assert artifacts_out[0].location == workspace / 'qux'
        assert artifacts_out[0].filetype is Executable
        assert artifacts_out[0].state is Linked
        assert artifacts_out[0].depends_on == []
        assert artifacts_out[0].defines == []
Пример #13
0
    def test_process(self, tmp_path: Path):
        pattern = r'.*\.foo'
        pathmap = PathMap(pattern,
                          DummyFileType,
                          DummyState)

        taskmap: Mapping[Tuple[Type[FileType], Type[State]], Task] = {
            (DummyFileType, DummyState): DummyTask(),
        }
        engine = Engine(tmp_path,
                        "test_target",
                        [pathmap],
                        taskmap)

        assert engine.target == "test_target"

        test_path = tmp_path / "test.foo"
        test_path.write_text("This is the Engine test")
        artifact = Artifact(test_path,
                            Unknown,
                            New)

        discovery: Dict[str, DiscoveryState] = {}
        objects: List[Artifact] = []
        lock = DummyLock()

        new_artifact = engine.process(artifact,
                                      discovery,
                                      objects,
                                      lock)

        assert len(new_artifact) == 1
        assert new_artifact[0].location == artifact.location
        assert new_artifact[0].filetype is DummyFileType
        assert new_artifact[0].state is DummyState
        assert new_artifact[0]._hash == 1630603340
        assert discovery == {}
        assert objects == []

        new_artifact2 = engine.process(new_artifact[0],
                                       discovery,
                                       objects,
                                       lock)

        assert len(new_artifact2) == 1
        assert new_artifact2[0].location == tmp_path / "test.bar"
        assert new_artifact2[0].filetype is DummyFileType2
        assert new_artifact2[0].state is DummyState2
        assert new_artifact2[0]._hash is None
        assert discovery == {}
        assert objects == []
Пример #14
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('C Analyser expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        reader = FileTextReader(artifact.location)

        state = CWorkingState(self.database)
        state.remove_c_file(reader.filename)

        new_artifact = Artifact(artifact.location, artifact.filetype, Analysed)

        state = CWorkingState(self.database)
        state.remove_c_file(reader.filename)

        index = clang.cindex.Index.create()
        translation_unit = index.parse(reader.filename, args=["-xc"])

        # Create include region line mappings
        self._locate_include_regions(translation_unit)

        # Now walk the actual nodes and find all relevant external symbols
        usr_includes = []
        current_def = None
        for node in translation_unit.cursor.walk_preorder():
            if node.kind == clang.cindex.CursorKind.FUNCTION_DECL:
                if (node.is_definition()
                        and node.linkage == clang.cindex.LinkageKind.EXTERNAL):
                    # This should catch function definitions which are exposed
                    # to the rest of the application
                    current_def = CSymbolID(node.spelling, artifact.location)
                    state.add_c_symbol(current_def)
                    new_artifact.add_definition(node.spelling)
                else:
                    # Any other declarations should be coming in via headers,
                    # we can use the injected pragmas to work out whether these
                    # are coming from system headers or user headers
                    if (self._check_for_include(
                            node.location.line) == "usr_include"):
                        usr_includes.append(node.spelling)

            elif (node.kind == clang.cindex.CursorKind.CALL_EXPR):
                # When encountering a function call we should be able to
                # cross-reference it with a definition seen earlier; and
                # if it came from a user supplied header then we will
                # consider it a dependency within the project
                if node.spelling in usr_includes and current_def is not None:
                    # TODO: Assumption that the most recent exposed
                    # definition encountered above is the one which
                    # should lodge this dependency - is that true?
                    state.add_c_dependency(current_def, node.spelling)
                    new_artifact.add_dependency(node.spelling)

        return [new_artifact]
Пример #15
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('Header Analyser expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        new_artifact = Artifact(artifact.location, artifact.filetype,
                                HeadersAnalysed)

        reader = FileTextReader(artifact.location)
        for line in reader.line_by_line():
            include_match: Optional[Match] \
                = self._include_pattern.match(line)
            if include_match:
                include: str = include_match.group(1)
                if include.startswith(('"', "'")):
                    include = include.strip('"').strip("'")
                    new_artifact.add_dependency(Path(self._workspace /
                                                     include))

        return [new_artifact]
Пример #16
0
 def test_mismatched_end_name(self, tmp_path: Path):
     """
     Ensure that the analyser handles mismatched block end names correctly.
     """
     test_file: Path = tmp_path / 'test.f90'
     test_file.write_text(
         dedent('''
                module wibble_mod
                type :: thing_type
                end type blasted_type
                end module wibble_mod
                '''))
     test_unit = FortranAnalyser(tmp_path)
     test_artifact = Artifact(test_file, FortranSource, Raw)
     with pytest.raises(TaskException):
         test_unit.run([test_artifact])
Пример #17
0
    def test_analyser_symbols(self, caplog, tmp_path):
        """
        Tests that symbols are identified, and calls are
        picked up provided they come from internal headers.
        """
        caplog.set_level(logging.DEBUG)

        test_file: Path = tmp_path / 'test.c'
        test_file.write_text(
            dedent('''
                  #pragma FAB UsrIncludeStart
                  void foo();
                  #pragma FAB UsrIncludeEnd

                  #pragma FAB UsrIncludeStart
                  void bar();
                  #pragma FAB UsrIncludeEnd

                  #pragma FAB SysIncludeStart
                  void baz();
                  #pragma FAB SysIncludeEnd

                  void foo() {
                      bar();
                      baz();
                  }
                   '''))

        database: SqliteStateDatabase = SqliteStateDatabase(tmp_path)
        test_unit = CAnalyser(tmp_path)
        test_artifact = Artifact(test_file, CSource, Raw)
        output_artifacts = test_unit.run([test_artifact])

        # Confirm database is updated
        working_state = CWorkingState(database)
        assert list(working_state) \
            == [CInfo(CSymbolID('foo', test_file),
                      ['bar'])]

        # Confirm returned Artifact is updated
        assert len(output_artifacts) == 1
        assert output_artifacts[0].defines == ['foo']
        assert output_artifacts[0].depends_on == ['bar']
        assert output_artifacts[0].location == test_file
        assert output_artifacts[0].filetype is CSource
        assert output_artifacts[0].state is Analysed
Пример #18
0
def test_queue(tmp_path: Path):

    dummy_engine = DummyEngine()

    q_manager = QueueManager(2, dummy_engine)
    q_manager.run()

    for i in range(1, 4):
        artifact = Artifact(tmp_path / f"file_{i}", Unknown, New)
        q_manager.add_to_queue(artifact)

    q_manager.check_queue_done()

    for i in range(1, 4):
        filename = tmp_path / f"file_{i}"
        assert filename.exists()

    q_manager.shutdown()
Пример #19
0
    def test_naked_use(self, tmp_path):
        """
        Ensures that an exception is raised if a "use" is found outside a
        program unit.
        """
        test_file: Path = tmp_path / 'test.f90'
        test_file.write_text(
            dedent('''
                   use beef_mod

                   module test_mod
                   end module test_mod
                   '''))

        test_unit = FortranAnalyser(tmp_path)
        test_artifact = Artifact(test_file, FortranSource, Raw)
        with pytest.raises(TaskException):
            test_unit.run([test_artifact])
Пример #20
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:

        if len(artifacts) < 1:
            msg = ('Linker expects at least one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        command = [self._linker]

        output_file = self._workspace / self._output_filename

        command.extend(['-o', str(output_file)])
        for artifact in artifacts:
            command.append(str(artifact.location))

        command.extend(self._flags)

        subprocess.run(command, check=True)

        return [Artifact(output_file, Executable, Linked)]
Пример #21
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('Fortran Preprocessor expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        command = [self._preprocessor]
        command.extend(self._flags)
        command.append(str(artifact.location))

        output_file = (self._workspace /
                       artifact.location.with_suffix('.f90').name)
        command.append(str(output_file))

        subprocess.run(command, check=True)

        return [Artifact(output_file, artifact.filetype, Raw)]
Пример #22
0
    def test_analyser_program_units(self, caplog, tmp_path):
        """
        Tests that program units and the "uses" they contain are correctly
        identified.
        """
        caplog.set_level(logging.DEBUG)

        test_file: Path = tmp_path / 'test.f90'
        test_file.write_text(
            dedent('''
                   program foo
                     use iso_fortran_env, only : output
                     use, intrinsic :: ios_c_binding
                     use beef_mod
                     implicit none
                   end program foo

                   module bar
                     use iso_fortran_env, only : output
                     use, intrinsic :: ios_c_binding
                     use cheese_mod, only : bits_n_bobs
                     implicit none
                   end module bar

                   function baz(first, second)
                     use iso_fortran_env, only : output
                     use, intrinsic :: ios_c_binding
                     use teapot_mod
                     implicit none
                   end function baz

                   subroutine qux()
                     use iso_fortran_env, only : output
                     use, intrinsic :: ios_c_binding
                     use wibble_mod
                     use wubble_mod, only: stuff_n_nonsense
                     implicit none
                   end subroutine qux
                   '''))

        database: SqliteStateDatabase = SqliteStateDatabase(tmp_path)
        test_unit = FortranAnalyser(tmp_path)
        test_artifact = Artifact(test_file, FortranSource, Raw)
        output_artifacts = test_unit.run([test_artifact])

        # Confirm database is updated
        working_state = FortranWorkingState(database)
        assert list(working_state) \
            == [FortranInfo(FortranUnitID('bar', tmp_path/'test.f90'),
                            ['cheese_mod']),
                FortranInfo(FortranUnitID('baz', tmp_path/'test.f90'),
                            ['teapot_mod']),
                FortranInfo(FortranUnitID('foo', tmp_path/'test.f90'),
                            ['beef_mod']),
                FortranInfo(FortranUnitID('qux', tmp_path/'test.f90'),
                            ['wibble_mod', 'wubble_mod'])]

        # Confirm returned Artifact is updated
        assert len(output_artifacts) == 1
        assert output_artifacts[0].defines == ['foo', 'bar', 'baz', 'qux']
        assert output_artifacts[0].depends_on == [
            'beef_mod', 'cheese_mod', 'teapot_mod', 'wibble_mod', 'wubble_mod'
        ]
        assert output_artifacts[0].location == test_file
        assert output_artifacts[0].filetype is FortranSource
        assert output_artifacts[0].state is Analysed
Пример #23
0
    def process(self,
                artifact: Artifact,
                discovery: Dict[str, DiscoveryState],
                objects: List[Artifact],
                lock: LockT) -> List[Artifact]:

        new_artifacts: List[Artifact] = []
        new_discovery: Dict[str, DiscoveryState] = {}
        new_objects: List[Artifact] = []
        # Identify tasks that are completely new
        if (artifact.state is New
                and artifact.filetype is Unknown):
            # Use the pathmap list to work out the
            # filetype and starting state
            new_artifact = None
            for pathmap in self._pathmaps:
                if artifact.location in pathmap:
                    new_artifact = Artifact(artifact.location,
                                            pathmap.filetype,
                                            pathmap.state)
            # Assuming we found a match and were able
            # to create the artifact, return it so that
            # it can be added to the queue
            if new_artifact is not None:
                # Also store its hash in the file database
                file_info = FileInfoDatabase(self._database)
                file_info.add_file_info(artifact.location,
                                        new_artifact.hash)
                new_artifacts.append(new_artifact)

        elif artifact.state is Analysed:

            # Work out whether this artifact needs to be
            # included in the build or not - if any of its
            # definitions are mentioned in the (shared)
            # discovery mapping, or if it is defining
            # the target of the build then it should be included

            # TODO: Looping through a list of what could
            # eventually contain every unit/symbol in the build has
            # the potential to become an issue for performance.
            # Longer term we probably want to drop using the shared
            # discovery array in favour of database lookups
            required = False
            for definition in artifact.defines:
                # Is this the target?
                if (definition == self.target
                        or definition in discovery):
                    required = True
                    break

            if required:
                # Update the discovery list to indicate that
                # the definitions from this Artifact are present
                # (but not yet compiled)
                for definition in artifact.defines:
                    if definition not in discovery:
                        new_discovery[definition] = DiscoveryState.SEEN

                # Now check whether the Artifact's dependencies
                # have already been seen and compiled
                compiled = [False]*len(artifact.depends_on)
                for idep, dependency in enumerate(artifact.depends_on):
                    # Only applies to str dependencies
                    if isinstance(dependency, Path):
                        continue
                    if dependency in discovery:
                        # Are the dependencies compiled?
                        if discovery[dependency] == DiscoveryState.COMPILED:
                            compiled[idep] = True
                    else:
                        # If the dependency isn't in the list at all yet
                        # then add an entry so the system knows we are
                        # expecting it later (for the above check)
                        new_discovery[dependency] = DiscoveryState.AWARE_OF

                # If the dependencies are satisfied (or there weren't
                # any) then this file can be compiled now
                if len(compiled) == 0 or all(compiled):
                    for definition in artifact.defines:
                        task = self._taskmap[(artifact.filetype,
                                              artifact.state)]
                        new_artifacts.extend(task.run([artifact]))
                        new_discovery[definition] = DiscoveryState.COMPILED
                else:
                    # If the dependencies weren't all satisfied then
                    # back on the queue for another pass later
                    new_artifacts.append(artifact)
            else:
                # If it wasn't required it could be later, so
                # put it back on the queue, unless the target
                # has been compiled, in which case it wasn't
                # needed at all!
                if (self._target not in discovery
                        or discovery[self._target] != DiscoveryState.COMPILED):
                    new_artifacts.append(artifact)

        elif artifact.state is Compiled:
            # Begin populating the list for linking
            new_objects.append(artifact)
            # But do not return a new artifact - this object
            # is "done" as far as the processing is concerned

            # But, if this is the file containing the target
            # that means everything must have been compiled
            # by this point; so we can do the linking step
            if self._target in artifact.defines:
                task = self._taskmap[(artifact.filetype,
                                      artifact.state)]
                new_artifacts.extend(task.run(objects + [artifact]))

        elif artifact.state is Linked:
            # Nothing to do at present with the final linked
            # executable, but included here for completeness
            pass
        else:
            # If the object specifies any paths in its dependencies
            # then these must exist before it can be processed
            # TODO: This needs more thorough logic and to come from
            # the database eventually
            ready = True
            for dependency in artifact.depends_on:
                if isinstance(dependency, Path):
                    if not dependency.exists():
                        ready = False

            if ready:
                # An artifact with a filetype and state set
                # will have an appropriate task that should
                # be used to run it (though unlike the old
                # implementation this is probably returning
                # the instance of the Task not the class)
                if ((artifact.filetype, artifact.state)
                        in self._taskmap):
                    task = self._taskmap[(artifact.filetype,
                                          artifact.state)]

                    new_artifacts.extend(task.run([artifact]))
            else:
                new_artifacts.append(artifact)

        # Update shared arrays
        lock.acquire()
        objects.extend(new_objects)
        for key, value in new_discovery.items():
            discovery[key] = value
        lock.release()

        return new_artifacts
Пример #24
0
 def run(self, artifacts: List[Artifact]):
     new_artifact = Artifact(artifacts[0].location.with_suffix('.bar'),
                             DummyFileType2,
                             DummyState2)
     return [new_artifact]
Пример #25
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('Fortran Analyser expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        reader = FileTextReader(artifact.location)

        new_artifact = Artifact(artifact.location, artifact.filetype, Analysed)

        state = FortranWorkingState(self.database)
        state.remove_fortran_file(reader.filename)

        normalised_source = FortranNormaliser(reader)
        scope: List[Tuple[str, str]] = []
        for line in normalised_source.line_by_line():
            logger.debug(scope)
            logger.debug('Considering: %s', line)

            if len(scope) == 0:
                unit_match: Optional[Match] \
                    = self._program_unit_pattern.match(line)
                if unit_match:
                    unit_type: str = unit_match.group(1).lower()
                    unit_name: str = unit_match.group(2).lower()
                    logger.debug('Found %s called "%s"', unit_type, unit_name)
                    unit_id = FortranUnitID(unit_name, reader.filename)
                    state.add_fortran_program_unit(unit_id)
                    new_artifact.add_definition(unit_name)
                    scope.append((unit_type, unit_name))
                    continue
            use_match: Optional[Match] \
                = self._use_pattern.match(line)
            if use_match:
                use_name: str = use_match.group(3).lower()
                if use_name in self._intrinsic_modules:
                    logger.debug('Ignoring intrinsic module "%s"', use_name)
                else:
                    if len(scope) == 0:
                        use_message \
                            = '"use" statement found outside program unit'
                        raise TaskException(use_message)
                    logger.debug('Found usage of "%s"', use_name)
                    unit_id = FortranUnitID(scope[0][1], reader.filename)
                    state.add_fortran_dependency(unit_id, use_name)
                    new_artifact.add_dependency(use_name)
                continue

            block_match: Optional[Match] = self._scoping_pattern.match(line)
            if block_match:
                # Beware we want the value of a different group to the one we
                # check the presence of.
                #
                block_name: str = block_match.group(1) \
                                  and block_match.group(2).lower()
                block_nature: str = block_match.group(3).lower()
                logger.debug('Found %s called "%s"', block_nature, block_name)
                scope.append((block_nature, block_name))
                continue

            proc_match: Optional[Match] \
                = self._procedure_pattern.match(line)
            if proc_match:
                proc_nature = proc_match.group(1).lower()
                proc_name = proc_match.group(2).lower()
                logger.debug('Found %s called "%s"', proc_nature, proc_name)
                # Note: We append a tuple so double brackets.
                scope.append((proc_nature, proc_name))
                continue

            iface_match: Optional[Match] = self._interface_pattern.match(line)
            if iface_match:
                iface_name = iface_match.group(1) \
                             and iface_match.group(1).lower()
                logger.debug('Found interface called "%s"', iface_name)
                scope.append(('interface', iface_name))
                continue

            type_match: Optional[Match] = self._type_pattern.match(line)
            if type_match:
                type_name = type_match.group(3).lower()
                logger.debug('Found type called "%s"', type_name)
                scope.append(('type', type_name))
                continue

            end_match: Optional[Match] = self._end_block_pattern.match(line)
            if end_match:
                end_nature: str = end_match.group(1) \
                    and end_match.group(1).lower()
                end_name: str = end_match.group(2) \
                    and end_match.group(2).lower()
                logger.debug('Found end of %s called %s', end_nature, end_name)
                exp: Tuple[str, str] = scope.pop()

                if end_nature is not None:
                    if end_nature != exp[0]:
                        end_message = 'Expected end of {exp} "{name}" ' \
                                      'but found {found}'
                        end_values = {
                            'exp': exp[0],
                            'name': exp[1],
                            'found': end_nature
                        }
                        raise TaskException(end_message.format(**end_values))
                if end_name is not None:
                    if end_name != exp[1]:
                        end_message = 'Expected end of {exp} "{name}" ' \
                                      'but found end of {found}'
                        end_values = {
                            'exp': exp[0],
                            'name': exp[1],
                            'found': end_name
                        }
                        raise TaskException(end_message.format(**end_values))

        return [new_artifact]
Пример #26
0
 def test_add_string_dependency(self):
     test_path = Path('/test/path')
     artifact = Artifact(test_path, Unknown, New)
     artifact.add_dependency("foo")
     assert artifact.depends_on == ["foo"]
Пример #27
0
    def test_analyser_scope(self, caplog, tmp_path):
        """
        Tests that the analyser is able to track scope correctly.
        """
        caplog.set_level(logging.DEBUG)

        test_file: Path = tmp_path / 'test.f90'
        test_file.write_text(
            dedent('''
                   program fred

                     implicit none

                     if (something) then
                       named: do i=1, 10
                       end do named
                     endif

                   contains

                     subroutine yabadabadoo()
                     end

                   end program

                   module barney

                     implicit none

                     type betty_type
                       integer :: property
                     contains
                       procedure inspect
                     end type

                     interface betty_type
                       procedure betty_constructor
                     end

                   contains

                     function inspect(this)
                       class(betty_type), intent(in) :: this
                       integer :: inspect
                       inspect = this%property
                     end function inspect

                   end module
                   '''))

        database: SqliteStateDatabase = SqliteStateDatabase(tmp_path)
        test_unit = FortranAnalyser(tmp_path)
        test_artifact = Artifact(test_file, FortranSource, Raw)
        output_artifacts = test_unit.run([test_artifact])

        # Confirm database is updated
        working_state = FortranWorkingState(database)
        assert list(working_state) \
            == [FortranInfo(FortranUnitID('barney', tmp_path/'test.f90'), []),
                FortranInfo(FortranUnitID('fred', tmp_path/'test.f90'), [])]

        # Confirm returned Artifact is updated
        assert len(output_artifacts) == 1
        assert output_artifacts[0].defines == ['fred', 'barney']
        assert output_artifacts[0].depends_on == []
        assert output_artifacts[0].location == test_file
        assert output_artifacts[0].filetype is FortranSource
        assert output_artifacts[0].state is Analysed
Пример #28
0
 def test_add_path_dependency(self):
     test_path = Path('/test/path')
     artifact = Artifact(test_path, Unknown, New)
     dep = Path('/path/to/bar')
     artifact.add_dependency(dep)
     assert artifact.depends_on == [dep]
Пример #29
0
 def test_add_definition(self):
     test_path = Path('/test/path')
     artifact = Artifact(test_path, Unknown, New)
     artifact.add_definition("bar")
     assert artifact.defines == ["bar"]
Пример #30
0
    def run(self, artifacts: List[Artifact]) -> List[Artifact]:
        logger = logging.getLogger(__name__)

        if len(artifacts) == 1:
            artifact = artifacts[0]
        else:
            msg = ('Fortran Analyser expects only one Artifact, '
                   f'but was given {len(artifacts)}')
            raise TaskException(msg)

        reader = FileTextReader(artifact.location)

        new_artifact = Artifact(artifact.location, artifact.filetype, Analysed)

        state = FortranWorkingState(self.database)
        state.remove_fortran_file(reader.filename)
        logger.debug('Analysing: %s', reader.filename)

        # If this file defines any C symbol bindings it may also
        # end up with an entry in the C part of the database
        cstate = CWorkingState(self.database)
        cstate.remove_c_file(reader.filename)

        normalised_source = FortranNormaliser(reader)
        scope: List[Tuple[str, str]] = []
        for line in normalised_source.line_by_line():
            logger.debug(scope)
            logger.debug('Considering: %s', line)

            if len(scope) == 0:
                unit_match: Optional[Match] \
                    = self._program_unit_pattern.match(line)
                if unit_match is not None:
                    unit_type: str = unit_match.group(1).lower()
                    unit_name: str = unit_match.group(2).lower()
                    logger.debug('Found %s called "%s"', unit_type, unit_name)
                    unit_id = FortranUnitID(unit_name, reader.filename)
                    state.add_fortran_program_unit(unit_id)
                    new_artifact.add_definition(unit_name)
                    scope.append((unit_type, unit_name))
                    continue
            use_match: Optional[Match] \
                = self._use_pattern.match(line)
            if use_match is not None:
                use_name: str = use_match.group(3).lower()
                if use_name in self._intrinsic_modules:
                    logger.debug('Ignoring intrinsic module "%s"', use_name)
                else:
                    if len(scope) == 0:
                        use_message \
                            = '"use" statement found outside program unit'
                        raise TaskException(use_message)
                    logger.debug('Found usage of "%s"', use_name)
                    unit_id = FortranUnitID(scope[0][1], reader.filename)
                    state.add_fortran_dependency(unit_id, use_name)
                    new_artifact.add_dependency(use_name)
                continue

            block_match: Optional[Match] = self._scoping_pattern.match(line)
            if block_match is not None:
                # Beware we want the value of a different group to the one we
                # check the presence of.
                #
                block_name: str = block_match.group(1) \
                                  and block_match.group(2).lower()
                block_nature: str = block_match.group(3).lower()
                logger.debug('Found %s called "%s"', block_nature, block_name)
                scope.append((block_nature, block_name))
                continue

            proc_match: Optional[Match] \
                = self._procedure_pattern.match(line)
            if proc_match is not None:
                proc_nature = proc_match.group(1).lower()
                proc_name = proc_match.group(2).lower()
                logger.debug('Found %s called "%s"', proc_nature, proc_name)
                scope.append((proc_nature, proc_name))

                # Check for the procedure being symbol-bound to C
                cbind_match: Optional[Match] \
                    = self._cbind_pattern.match(line)
                if cbind_match is not None:
                    cbind_name = cbind_match.group(2)
                    # The name keyword on the bind statement is optional.
                    # If it doesn't exist, the procedure name is used
                    if cbind_name is None:
                        cbind_name = proc_name
                    cbind_name = cbind_name.lower().strip("'\"")
                    logger.debug('Bound to C symbol "%s"', cbind_name)
                    # A bind within an interface block means this is
                    # exposure of a C-defined function to Fortran,
                    # otherwise it is going the other way (allowing C
                    # code to call the Fortran procedure)
                    if any([stype == "interface" for stype, _ in scope]):
                        # TODO: This is sort of hijacking the mechanism used
                        # for Fortran module dependencies, only using the
                        # symbol name. Longer term we probably need a more
                        # elegant solution
                        logger.debug('In an interface block; so a dependency')
                        unit_id = FortranUnitID(scope[0][1], reader.filename)
                        state.add_fortran_dependency(unit_id, cbind_name)
                        new_artifact.add_dependency(cbind_name)
                    else:
                        # Add to the C database
                        logger.debug('Not an interface block; so a definition')
                        symbol_id = CSymbolID(cbind_name, reader.filename)
                        cstate.add_c_symbol(symbol_id)
                        new_artifact.add_definition(cbind_name)
                continue

            cbind_match = self._cbind_pattern.match(line)
            if cbind_match is not None:
                # This should be a line binding from C to a variable definition
                # (procedure binds are dealt with above)
                cbind_name = cbind_match.group(2)

                # The name keyword on the bind statement is optional.
                # If it doesn't exist, the Fortran variable name is used
                if cbind_name is None:
                    var_search = re.search(r'.*::\s*(\w+)', line)
                    if var_search:
                        cbind_name = var_search.group(1)
                    else:
                        cbind_message \
                            = 'failed to find variable name ' \
                              'on C bound variable'
                        raise TaskException(cbind_message)

                cbind_name = cbind_name.lower().strip("'\"")
                logger.debug('Found C bound variable called "%s"', cbind_name)

                # Add to the C database
                symbol_id = CSymbolID(cbind_name, reader.filename)
                cstate.add_c_symbol(symbol_id)
                new_artifact.add_definition(cbind_name)

            iface_match: Optional[Match] = self._interface_pattern.match(line)
            if iface_match is not None:
                iface_name = iface_match.group(1) \
                             and iface_match.group(1).lower()
                logger.debug('Found interface called "%s"', iface_name)
                scope.append(('interface', iface_name))
                continue

            type_match: Optional[Match] = self._type_pattern.match(line)
            if type_match is not None:
                type_name = type_match.group(3).lower()
                logger.debug('Found type called "%s"', type_name)
                scope.append(('type', type_name))
                continue

            end_match: Optional[Match] = self._end_block_pattern.match(line)
            if end_match is not None:
                end_nature: str = end_match.group(1) \
                    and end_match.group(1).lower()
                end_name: str = end_match.group(2) \
                    and end_match.group(2).lower()
                logger.debug('Found end of %s called %s', end_nature, end_name)
                exp: Tuple[str, str] = scope.pop()

                if end_nature is not None:
                    if end_nature != exp[0]:
                        end_message = 'Expected end of {exp} "{name}" ' \
                                      'but found {found}'
                        end_values = {
                            'exp': exp[0],
                            'name': exp[1],
                            'found': end_nature
                        }
                        raise TaskException(end_message.format(**end_values))
                if end_name is not None:
                    if end_name != exp[1]:
                        end_message = 'Expected end of {exp} "{name}" ' \
                                      'but found end of {found}'
                        end_values = {
                            'exp': exp[0],
                            'name': exp[1],
                            'found': end_name
                        }
                        raise TaskException(end_message.format(**end_values))

        return [new_artifact]