Пример #1
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        first_src = '\n'.join(testcase.input)
        files_dict = dict(testcase.files)
        second_src = files_dict['tmp/next.py']
        options = parse_options(first_src, testcase, 1)

        messages1, files1 = self.build(first_src, options)
        messages2, files2 = self.build(second_src, options)

        a = []
        if messages1:
            a.extend(messages1)
        if messages2:
            a.append('== next ==')
            a.extend(messages2)

        assert files1 is not None and files2 is not None, ('cases where CompileError'
                                                           ' occurred should not be run')
        prefix = '__main__'
        snapshot1 = snapshot_symbol_table(prefix, files1['__main__'].names)
        snapshot2 = snapshot_symbol_table(prefix, files2['__main__'].names)
        diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2)
        for trigger in sorted(diff):
            a.append(trigger)

        assert_string_arrays_equal(
            testcase.output, a,
            'Invalid output ({}, line {})'.format(testcase.file,
                                                  testcase.line))
Пример #2
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        first_src = '\n'.join(testcase.input)
        files_dict = dict(testcase.files)
        second_src = files_dict['tmp/next.py']

        messages1, files1 = self.build(first_src)
        messages2, files2 = self.build(second_src)

        a = []
        if messages1:
            a.extend(messages1)
        if messages2:
            a.append('== next ==')
            a.extend(messages2)

        assert files1 is not None and files2 is not None, (
            'cases where CompileError'
            ' occurred should not be run')
        prefix = '__main__'
        snapshot1 = snapshot_symbol_table(prefix, files1['__main__'].names)
        snapshot2 = snapshot_symbol_table(prefix, files2['__main__'].names)
        diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2)
        for trigger in sorted(diff):
            a.append(trigger)

        assert_string_arrays_equal(
            testcase.output, a,
            'Invalid output ({}, line {})'.format(testcase.file,
                                                  testcase.line))
Пример #3
0
    def update_single(
        self, module: str, path: str
    ) -> Tuple[List[str], List[Tuple[str, str]], Tuple[str, str], bool]:
        """Update a single modified module.

        If the module contains imports of previously unseen modules, only process one of
        the new modules and return the remaining work to be done.

        Returns:
            Tuple with these items:

            - Error messages
            - Remaining modules to process as (module id, path) tuples
            - Module which was actually processed as (id, path) tuple
            - Whether there was a blocking error in the module
        """
        self.manager.log_fine_grained('--- update single %r ---' % module)

        # TODO: If new module brings in other modules, we parse some files multiple times.
        manager = self.manager
        previous_modules = self.previous_modules
        graph = self.graph

        # Record symbol table snaphot of old version the changed module.
        old_snapshots = {}  # type: Dict[str, Dict[str, SnapshotItem]]
        if module in manager.modules:
            snapshot = snapshot_symbol_table(module,
                                             manager.modules[module].names)
            old_snapshots[module] = snapshot

        manager.errors.reset()
        result = update_single_isolated(module, path, manager,
                                        previous_modules, graph)
        if isinstance(result, BlockedUpdate):
            # Blocking error -- just give up
            module, path, remaining, errors = result
            self.previous_modules = get_module_to_path_map(manager)
            return errors, remaining, (module, path), True
        assert isinstance(result, NormalUpdate)  # Work around #4124
        module, path, remaining, tree = result

        # TODO: What to do with stale dependencies?
        triggered = calculate_active_triggers(manager, old_snapshots,
                                              {module: tree})
        if is_verbose(self.manager):
            filtered = [
                trigger for trigger in triggered if not trigger.endswith('__>')
            ]
            self.manager.log_fine_grained('triggered: %r' % sorted(filtered))
        self.triggered.extend(triggered | self.previous_targets_with_errors)
        collect_dependencies({module: tree}, self.deps, graph)
        remaining += propagate_changes_using_dependencies(
            manager, graph, self.deps, triggered, {module},
            self.previous_targets_with_errors)

        # Preserve state needed for the next update.
        self.previous_targets_with_errors = manager.errors.targets()
        self.previous_modules = get_module_to_path_map(manager)

        return manager.errors.new_messages(), remaining, (module, path), False
Пример #4
0
def calculate_active_triggers(manager: BuildManager,
                              old_snapshots: Dict[str, Dict[str, SnapshotItem]],
                              new_modules: Dict[str, Optional[MypyFile]]) -> Set[str]:
    """Determine activated triggers by comparing old and new symbol tables.

    For example, if only the signature of function m.f is different in the new
    symbol table, return {'<m.f>'}.
    """
    names = set()  # type: Set[str]
    for id in new_modules:
        snapshot1 = old_snapshots.get(id)
        if snapshot1 is None:
            names.add(id)
            snapshot1 = {}
        new = new_modules[id]
        if new is None:
            snapshot2 = snapshot_symbol_table(id, SymbolTable())
            names.add(id)
        else:
            snapshot2 = snapshot_symbol_table(id, new.names)
        diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
        package_nesting_level = id.count('.')
        for item in diff.copy():
            if (item.count('.') <= package_nesting_level + 1
                    and item.split('.')[-1] not in ('__builtins__',
                                                    '__file__',
                                                    '__name__',
                                                    '__package__',
                                                    '__doc__')):
                # Activate catch-all wildcard trigger for top-level module changes (used for
                # "from m import *"). This also gets triggered by changes to module-private
                # entries, but as these unneeded dependencies only result in extra processing,
                # it's a minor problem.
                #
                # TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
                #     special casing them here.
                diff.add(id + WILDCARD_TAG)
            if item.count('.') > package_nesting_level + 1:
                # These are for changes within classes, used by protocols.
                diff.add(item.rsplit('.', 1)[0] + WILDCARD_TAG)

        names |= diff
    return {make_trigger(name) for name in names}
Пример #5
0
def calculate_active_triggers(manager: BuildManager,
                              old_snapshots: Dict[str, Dict[str, SnapshotItem]],
                              new_modules: Dict[str, Optional[MypyFile]]) -> Set[str]:
    """Determine activated triggers by comparing old and new symbol tables.

    For example, if only the signature of function m.f is different in the new
    symbol table, return {'<m.f>'}.
    """
    names = set()  # type: Set[str]
    for id in new_modules:
        snapshot1 = old_snapshots.get(id)
        if snapshot1 is None:
            names.add(id)
            snapshot1 = {}
        new = new_modules[id]
        if new is None:
            snapshot2 = snapshot_symbol_table(id, SymbolTable())
            names.add(id)
        else:
            snapshot2 = snapshot_symbol_table(id, new.names)
        diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
        package_nesting_level = id.count('.')
        for item in diff.copy():
            if (item.count('.') <= package_nesting_level + 1
                    and item.split('.')[-1] not in ('__builtins__',
                                                    '__file__',
                                                    '__name__',
                                                    '__package__',
                                                    '__doc__')):
                # Activate catch-all wildcard trigger for top-level module changes (used for
                # "from m import *"). This also gets triggered by changes to module-private
                # entries, but as these unneeded dependencies only result in extra processing,
                # it's a minor problem.
                #
                # TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
                #     special casing them here.
                diff.add(id + WILDCARD_TAG)
            if item.count('.') > package_nesting_level + 1:
                # These are for changes within classes, used by protocols.
                diff.add(item.rsplit('.', 1)[0] + WILDCARD_TAG)

        names |= diff
    return {make_trigger(name) for name in names}
Пример #6
0
def calculate_active_triggers(
        manager: BuildManager, old_snapshots: Dict[str, Dict[str,
                                                             SnapshotItem]],
        new_modules: Dict[str, Optional[MypyFile]]) -> Set[str]:
    """Determine activated triggers by comparing old and new symbol tables.

    For example, if only the signature of function m.f is different in the new
    symbol table, return {'<m.f>'}.
    """
    names = set()  # type: Set[str]
    for id in new_modules:
        snapshot1 = old_snapshots[id]
        new = new_modules[id]
        if new is None:
            snapshot2 = snapshot_symbol_table(id, SymbolTable())
            names.add(id)
        else:
            snapshot2 = snapshot_symbol_table(id, new.names)
        names |= compare_symbol_table_snapshots(id, snapshot1, snapshot2)
    return {make_trigger(name) for name in names}
Пример #7
0
    def update(self, changed_modules: List[str]) -> List[str]:
        """Update previous build result by processing changed modules.

        Also propagate changes to other modules as needed, but only process
        those parts of other modules that are affected by the changes. Retain
        the existing ASTs and symbol tables of unaffected modules.

        TODO: What about blocking errors?

        Args:
            manager: State of the build
            graph: Additional state of the build
            deps: Fine-grained dependcy map for the build (mutated by this function)
            changed_modules: Modules changed since the previous update/build (assume
                this is correct; not validated here)

        Returns:
            A list of errors.
        """
        if DEBUG:
            print('==== update ====')
        manager = self.manager
        graph = self.graph

        # Record symbol table snaphots of old versions of changed moduiles.
        old_snapshots = {}
        for id in changed_modules:
            if id in manager.modules:
                snapshot = snapshot_symbol_table(id, manager.modules[id].names)
                old_snapshots[id] = snapshot

        manager.errors.reset()
        new_modules = build_incremental_step(manager, changed_modules, graph)
        # TODO: What to do with stale dependencies?
        triggered = calculate_active_triggers(manager, old_snapshots,
                                              new_modules)
        if DEBUG:
            print('triggered:', sorted(triggered))
        update_dependencies(new_modules, self.deps, graph, self.options)
        propagate_changes_using_dependencies(manager, graph, self.deps,
                                             triggered, set(changed_modules),
                                             self.previous_targets_with_errors,
                                             graph)
        self.previous_targets_with_errors = manager.errors.targets()
        return manager.errors.messages()
Пример #8
0
def reprocess_nodes(manager: BuildManager, graph: Dict[str, State],
                    module_id: str, nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained(
            '%s not in graph (blocking errors or deleted?)' % module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path,
                                          file_node.ignored_lines)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer_pass3.refresh_partial(
                deferred.node, patches)

    apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node,
                       new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
Пример #9
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # If one of the nodes is the module itself, emit any errors that
    # happened before semantic analysis.
    for target in targets:
        if target == module_id:
            for info in graph[module_id].early_errors:
                manager.errors.add_error_info(info)

    # Strip semantic analysis information.
    saved_attrs = {}  # type: SavedAttributes
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname)
        strip_target(deferred.node, saved_attrs)
    semantic_analysis_for_targets(graph[module_id], nodes, graph, saved_attrs)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname,
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    graph[module_id].free_state()

    return new_triggered
Пример #10
0
    def update_module(self,
                      module: str,
                      path: str,
                      force_removed: bool) -> Tuple[List[Tuple[str, str]],
                                                    Tuple[str, str],
                                                    Optional[List[str]]]:
        """Update a single modified module.

        If the module contains imports of previously unseen modules, only process one of
        the new modules and return the remaining work to be done.

        Args:
            module: Id of the module
            path: File system path of the module
            force_removed: If True, consider module removed from the build even if path
                exists (used for removing an existing file from the build)

        Returns:
            Tuple with these items:

            - Remaining modules to process as (module id, path) tuples
            - Module which was actually processed as (id, path) tuple
            - If there was a blocking error, the error messages from it
        """
        self.manager.log_fine_grained('--- update single %r ---' % module)
        self.updated_modules.append(module)

        manager = self.manager
        previous_modules = self.previous_modules
        graph = self.graph

        ensure_deps_loaded(module, self.deps, graph)

        # If this is an already existing module, make sure that we have
        # its tree loaded so that we can snapshot it for comparison.
        ensure_trees_loaded(manager, graph, [module])

        t0 = time.time()
        # Record symbol table snapshot of old version the changed module.
        old_snapshots = {}  # type: Dict[str, Dict[str, SnapshotItem]]
        if module in manager.modules:
            snapshot = snapshot_symbol_table(module, manager.modules[module].names)
            old_snapshots[module] = snapshot

        manager.errors.reset()
        self.processed_targets.append(module)
        result = update_module_isolated(module, path, manager, previous_modules, graph,
                                        force_removed)
        if isinstance(result, BlockedUpdate):
            # Blocking error -- just give up
            module, path, remaining, errors = result
            self.previous_modules = get_module_to_path_map(graph)
            return remaining, (module, path), errors
        assert isinstance(result, NormalUpdate)  # Work around #4124
        module, path, remaining, tree = result

        # TODO: What to do with stale dependencies?
        t1 = time.time()
        triggered = calculate_active_triggers(manager, old_snapshots, {module: tree})
        if is_verbose(self.manager):
            filtered = [trigger for trigger in triggered
                        if not trigger.endswith('__>')]
            self.manager.log_fine_grained('triggered: %r' % sorted(filtered))
        self.triggered.extend(triggered | self.previous_targets_with_errors)
        if module in graph:
            graph[module].update_fine_grained_deps(self.deps)
            graph[module].free_state()
        remaining += propagate_changes_using_dependencies(
            manager, graph, self.deps, triggered,
            {module},
            targets_with_errors=set(), processed_targets=self.processed_targets)
        t2 = time.time()
        manager.add_stats(
            update_isolated_time=t1 - t0,
            propagate_time=t2 - t1)

        # Preserve state needed for the next update.
        self.previous_targets_with_errors.update(manager.errors.targets())
        self.previous_modules = get_module_to_path_map(graph)

        return remaining, (module, path), None
Пример #11
0
    def update_single(
        self, module: str, path: str
    ) -> Tuple[List[str], List[Tuple[str, str]], Tuple[str, str], bool]:
        """Update a single modified module.

        If the module contains imports of previously unseen modules, only process one of
        the new modules and return the remaining work to be done.

        Returns:
            Tuple with these items:

            - Error messages
            - Remaining modules to process as (module id, path) tuples
            - Module which was actually processed as (id, path) tuple
            - Whether there was a blocking error in the module
        """
        if DEBUG:
            print('--- update single %r ---' % module)

        # TODO: If new module brings in other modules, we parse some files multiple times.
        manager = self.manager
        previous_modules = self.previous_modules

        # Record symbol table snaphot of old version the changed module.
        old_snapshots = {}  # type: Dict[str, Dict[str, SnapshotItem]]
        if module in manager.modules:
            snapshot = snapshot_symbol_table(module,
                                             manager.modules[module].names)
            old_snapshots[module] = snapshot

        manager.errors.reset()
        result = update_single_isolated(module, path, manager,
                                        previous_modules)
        if isinstance(result, BlockedUpdate):
            # Blocking error -- just give up
            module, path, remaining = result
            self.previous_modules = get_module_to_path_map(manager)
            return manager.errors.messages(), remaining, (module, path), True
        assert isinstance(result, NormalUpdate)  # Work around #4124
        module, path, remaining, tree, graph = result

        # TODO: What to do with stale dependencies?
        triggered = calculate_active_triggers(manager, old_snapshots,
                                              {module: tree})
        if DEBUG:
            print('triggered:', sorted(triggered))
        update_dependencies({module: tree}, self.deps, graph, self.options)
        propagate_changes_using_dependencies(manager, graph, self.deps,
                                             triggered, {module},
                                             self.previous_targets_with_errors,
                                             graph)

        # Preserve state needed for the next update.
        self.previous_targets_with_errors = manager.errors.targets()
        # If deleted, module won't be in the graph.
        if module in graph:
            # Generate metadata so that we can reuse the AST in the next run.
            graph[module].write_cache()
        for id, state in graph.items():
            # Look up missing ASTs from saved cache.
            if state.tree is None and id in manager.saved_cache:
                meta, tree, type_map = manager.saved_cache[id]
                state.tree = tree
        mark_all_meta_as_memory_only(graph, manager)
        manager.saved_cache = preserve_full_cache(graph, manager)
        self.previous_modules = get_module_to_path_map(manager)
        self.type_maps = extract_type_maps(graph)

        return manager.errors.messages(), remaining, (module, path), False
Пример #12
0
    def update(self, changed_modules: List[Tuple[str, str]]) -> List[str]:
        """Update previous build result by processing changed modules.

        Also propagate changes to other modules as needed, but only process
        those parts of other modules that are affected by the changes. Retain
        the existing ASTs and symbol tables of unaffected modules.

        Create new graph with new State objects, but reuse original BuildManager.

        Args:
            changed_modules: Modules changed since the previous update/build; each is
                a (module id, path) tuple. Includes modified, added and deleted modules.
                Assume this is correct; it's not validated here.

        Returns:
            A list of errors.
        """
        changed_ids = [id for id, _ in changed_modules]
        if DEBUG:
            print('==== update %s ====' % changed_ids)
        if self.blocking_errors:
            # TODO: Relax this requirement
            assert self.blocking_errors == changed_ids
        manager = self.manager
        graph = self.graph

        # Record symbol table snaphots of old versions of changed moduiles.
        old_snapshots = {}
        for id, _ in changed_modules:
            if id in manager.modules:
                snapshot = snapshot_symbol_table(id, manager.modules[id].names)
                old_snapshots[id] = snapshot
            else:
                old_snapshots[id] = {}

        manager.errors.reset()
        try:
            new_modules, graph = build_incremental_step(
                manager, changed_modules, graph)
        except CompileError as err:
            self.blocking_errors = changed_ids
            return err.messages
        self.blocking_errors = []

        # TODO: What to do with stale dependencies?
        triggered = calculate_active_triggers(manager, old_snapshots,
                                              new_modules)
        if DEBUG:
            print('triggered:', sorted(triggered))
        update_dependencies(new_modules, self.deps, graph, self.options)
        propagate_changes_using_dependencies(manager, graph, self.deps,
                                             triggered, set(changed_ids),
                                             self.previous_targets_with_errors,
                                             graph)

        # Preserve state needed for the next update.
        self.previous_targets_with_errors = manager.errors.targets()
        for id, _ in changed_modules:
            # If deleted, module won't be in the graph.
            if id in graph:
                # Generate metadata so that we can reuse the AST in the next run.
                graph[id].write_cache()
        for id, state in graph.items():
            # Look up missing ASTs from saved cache.
            if state.tree is None and id in manager.saved_cache:
                meta, tree, type_map = manager.saved_cache[id]
                state.tree = tree
        manager.saved_cache = preserve_full_cache(graph, manager)
        self.graph = graph

        return manager.errors.messages()
Пример #13
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path, file_node.ignored_lines)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo,
                scope=manager.semantic_analyzer_pass3.scope):
            manager.semantic_analyzer_pass3.refresh_partial(deferred.node, patches)

    with semantic_analyzer.file_context(
            file_node=file_node,
            fnam=file_node.path,
            options=manager.options,
            active_type=None):
        apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
Пример #14
0
    def update_module(self,
                      module: str,
                      path: str,
                      force_removed: bool) -> Tuple[List[Tuple[str, str]],
                                                    Tuple[str, str],
                                                    Optional[List[str]]]:
        """Update a single modified module.

        If the module contains imports of previously unseen modules, only process one of
        the new modules and return the remaining work to be done.

        Args:
            module: Id of the module
            path: File system path of the module
            force_removed: If True, consider module removed from the build even if path
                exists (used for removing an existing file from the build)

        Returns:
            Tuple with these items:

            - Remaining modules to process as (module id, path) tuples
            - Module which was actually processed as (id, path) tuple
            - If there was a blocking error, the error messages from it
        """
        self.manager.log_fine_grained('--- update single %r ---' % module)
        self.updated_modules.append(module)

        manager = self.manager
        previous_modules = self.previous_modules
        graph = self.graph

        # Record symbol table snaphot of old version the changed module.
        old_snapshots = {}  # type: Dict[str, Dict[str, SnapshotItem]]
        if module in manager.modules:
            snapshot = snapshot_symbol_table(module, manager.modules[module].names)
            old_snapshots[module] = snapshot

        manager.errors.reset()
        result = update_module_isolated(module, path, manager, previous_modules, graph,
                                        force_removed)
        if isinstance(result, BlockedUpdate):
            # Blocking error -- just give up
            module, path, remaining, errors = result
            self.previous_modules = get_module_to_path_map(manager)
            return remaining, (module, path), errors
        assert isinstance(result, NormalUpdate)  # Work around #4124
        module, path, remaining, tree = result

        # TODO: What to do with stale dependencies?
        triggered = calculate_active_triggers(manager, old_snapshots, {module: tree})
        if is_verbose(self.manager):
            filtered = [trigger for trigger in triggered
                        if not trigger.endswith('__>')]
            self.manager.log_fine_grained('triggered: %r' % sorted(filtered))
        self.triggered.extend(triggered | self.previous_targets_with_errors)
        collect_dependencies({module: tree}, self.deps, graph)
        remaining += propagate_changes_using_dependencies(
            manager, graph, self.deps, triggered,
            {module},
            targets_with_errors=set())

        # Preserve state needed for the next update.
        self.previous_targets_with_errors.update(manager.errors.targets())
        self.previous_modules = get_module_to_path_map(manager)

        return remaining, (module, path), None
Пример #15
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    patches = []  # type: List[Callable[[], None]]
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname())
        if not manager.options.new_semantic_analyzer:
            strip_target(deferred.node)
        else:
            patches = strip_target_new(deferred.node)
    if not options.new_semantic_analyzer:
        re_analyze_nodes(file_node, nodes, manager, options)
    else:
        process_selected_targets(graph[module_id], nodes, graph, patches)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    return new_triggered