コード例 #1
0
ファイル: update.py プロジェクト: Michael0x2a/mypy
def find_targets_recursive(
        manager: BuildManager,
        graph: Graph,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        up_to_date_modules: Set[str]) -> Tuple[Dict[str, Set[FineGrainedDeferredNode]],
                                               Set[str], Set[TypeInfo]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: A tuple containing a:
     * Dictionary from module id to a set of stale targets.
     * A set of module ids for unparsed modules with stale targets.
    """
    result = {}  # type: Dict[str, Set[FineGrainedDeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]
    stale_protos = set()  # type: Set[TypeInfo]
    unloaded_files = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                module_id = module_prefix(graph, trigger_to_target(target))
                if module_id:
                    ensure_deps_loaded(module_id, deps, graph)

                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(graph, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if (module_id not in manager.modules
                        or manager.modules[module_id].is_cache_skeleton):
                    # We haven't actually parsed and checked the module, so we don't have
                    # access to the actual nodes.
                    # Add it to the queue of files that need to be processed fully.
                    unloaded_files.add(module_id)
                    continue

                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process: %s' % target)
                deferred, stale_proto = lookup_target(manager, target)
                if stale_proto:
                    stale_protos.add(stale_proto)
                result[module_id].update(deferred)

    return result, unloaded_files, stale_protos
コード例 #2
0
ファイル: update.py プロジェクト: chadrik/mypy
def ensure_trees_loaded(manager: BuildManager, graph: Dict[str, State],
                        initial: Sequence[str]) -> None:
    """Ensure that the modules in initial and their deps have loaded trees."""
    to_process = find_unloaded_deps(manager, graph, initial)
    if to_process:
        if is_verbose(manager):
            manager.log_fine_grained("Calling process_fresh_modules on set of size {} ({})".format(
                len(to_process), sorted(to_process)))
        process_fresh_modules(graph, to_process, manager)
コード例 #3
0
ファイル: update.py プロジェクト: chadrik/mypy
def propagate_changes_using_dependencies(
        manager: BuildManager,
        graph: Dict[str, State],
        deps: Dict[str, Set[str]],
        triggered: Set[str],
        up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    num_iter = 0
    remaining_modules = []  # type: List[Tuple[str, str]]

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)

        todo, unloaded, stale_protos = find_targets_recursive(manager, graph,
                                                              triggered, deps, up_to_date_modules)
        # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
        remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(graph, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' % target)
                more_nodes, _ = lookup_target(manager, target)
                todo[id].update(more_nodes)
        triggered = set()
        # First invalidate subtype caches in all stale protocols.
        # We need to do this to avoid false negatives if the protocol itself is
        # unchanged, but was marked stale because its sub- (or super-) type changed.
        for info in stale_protos:
            TypeState.reset_subtype_caches_for(info)
        # Then fully reprocess all targets.
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
コード例 #4
0
ファイル: update.py プロジェクト: sixolet/mypy
def verify_dependencies(state: State, manager: BuildManager) -> None:
    """Report errors for import targets in module that don't exist."""
    # Strip out indirect dependencies. See comment in build.load_graph().
    dependencies = [dep for dep in state.dependencies if state.priorities.get(dep) != PRI_INDIRECT]
    for dep in dependencies + state.suppressed:  # TODO: ancestors?
        if dep not in manager.modules and not manager.options.ignore_missing_imports:
            assert state.tree
            line = state.dep_line_map.get(dep, 1)
            assert state.path
            manager.module_not_found(state.path, state.id, line, dep)
コード例 #5
0
ファイル: update.py プロジェクト: sixolet/mypy
def propagate_changes_using_dependencies(
        manager: BuildManager,
        graph: Dict[str, State],
        deps: Dict[str, Set[str]],
        triggered: Set[str],
        up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    num_iter = 0
    remaining_modules = []

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)

        todo = find_targets_recursive(manager, triggered, deps, up_to_date_modules)
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(manager.modules, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' % target)
                todo[id].update(lookup_target(manager, target))
        triggered = set()
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            if manager.modules[id].is_cache_skeleton:
                # We have only loaded the cache for this file, not the actual file,
                # so we can't access the nodes to reprocess.
                # Add it to the queue of files that need to be processed fully.
                remaining_modules.append((id, manager.modules[id].path))
            else:
                triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
コード例 #6
0
ファイル: update.py プロジェクト: sixolet/mypy
def delete_module(module_id: str,
                  graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
コード例 #7
0
ファイル: update.py プロジェクト: Michael0x2a/mypy
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]:
    """Return the fine-grained dependency map for an entire build."""
    # Deps for each module were computed during build() or loaded from the cache.
    deps = manager.load_fine_grained_deps(FAKE_ROOT_MODULE)  # type: Dict[str, Set[str]]
    for id in graph:
        if graph[id].tree is not None:
            merge_dependencies(graph[id].compute_fine_grained_deps(), deps)
    TypeState.add_all_protocol_deps(deps)
    return deps
コード例 #8
0
ファイル: update.py プロジェクト: rkday/mypy
def delete_module(module_id: str, graph: Graph, manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Deletion of a package
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    if module_id in manager.saved_cache:
        del manager.saved_cache[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
コード例 #9
0
def get_all_dependencies(manager: BuildManager,
                         graph: Dict[str, State]) -> Dict[str, Set[str]]:
    """Return the fine-grained dependency map for an entire build."""
    # Deps for each module were computed during build() or loaded from the cache.
    deps = manager.load_fine_grained_deps(
        FAKE_ROOT_MODULE)  # type: Dict[str, Set[str]]
    for id in graph:
        if graph[id].tree is not None:
            merge_dependencies(graph[id].compute_fine_grained_deps(), deps)
    TypeState.add_all_protocol_deps(deps)
    return deps
コード例 #10
0
ファイル: testgraph.py プロジェクト: wlmgithub/mypy
 def _make_manager(self) -> BuildManager:
     manager = BuildManager(
         data_dir='',
         lib_path=[],
         ignore_prefix='',
         source_set=BuildSourceSet([]),
         reports=Reports('', {}),
         options=Options(),
         version_id=__version__,
     )
     return manager
コード例 #11
0
ファイル: testgraph.py プロジェクト: khlumzeemee/Transcrypt
 def _make_manager(self):
     manager = BuildManager(
         data_dir='',
         lib_path=[],
         ignore_prefix='',
         source_set=None,
         reports=None,
         options=Options(),
         version_id=__version__,
     )
     return manager
コード例 #12
0
def delete_module(module_id: str, path: str, graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
    # If the module is removed from the build but still exists, then
    # we mark it as missing so that it will get picked up by import from still.
    if manager.fscache.isfile(path):
        manager.missing_modules.add(module_id)
コード例 #13
0
def find_targets_recursive(
        manager: BuildManager,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        modules: Dict[str, MypyFile],
        up_to_date_modules: Set[str]) -> Dict[str, Set[DeferredNode]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: Dictionary from module id to a set of stale targets.
    """
    result = {}  # type: Dict[str, Set[DeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(modules, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process %s' % target)
                deferred = lookup_target(modules, target)
                result[module_id].update(deferred)

    return result
コード例 #14
0
ファイル: update.py プロジェクト: sixolet/mypy
def find_targets_recursive(
        manager: BuildManager,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        up_to_date_modules: Set[str]) -> Dict[str, Set[DeferredNode]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: Dictionary from module id to a set of stale targets.
    """
    result = {}  # type: Dict[str, Set[DeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(manager.modules, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process: %s' % target)
                deferred = lookup_target(manager, target)
                result[module_id].update(deferred)

    return result
コード例 #15
0
ファイル: update.py プロジェクト: chadrik/mypy
def delete_module(module_id: str,
                  path: str,
                  graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
    # If the module is removed from the build but still exists, then
    # we mark it as missing so that it will get picked up by import from still.
    if manager.fscache.isfile(path):
        manager.missing_modules.add(module_id)
コード例 #16
0
ファイル: testgraph.py プロジェクト: zanellia/mypy
 def _make_manager(self) -> BuildManager:
     errors = Errors()
     options = Options()
     manager = BuildManager(
         data_dir='',
         lib_path=[],
         ignore_prefix='',
         source_set=BuildSourceSet([]),
         reports=Reports('', {}),
         options=options,
         version_id=__version__,
         plugin=Plugin(options),
         errors=errors,
     )
     return manager
コード例 #17
0
    def __init__(self, manager: BuildManager, graph: Dict[str, State]) -> None:
        """Initialize fine-grained build based on a batch build.

        Args:
            manager: State of the build (mutated by this class)
            graph: Additional state of the build
        """
        self.manager = manager
        self.options = manager.options
        self.graph = graph
        self.deps = get_all_dependencies(manager, graph, self.options)
        self.previous_targets_with_errors = manager.errors.targets()
        # Modules that had blocking errors in the previous run.
        # TODO: Handle blocking errors in the initial build
        self.blocking_errors = []  # type: List[str]
        manager.saved_cache = preserve_full_cache(graph, manager)
コード例 #18
0
ファイル: update.py プロジェクト: nehaljwani/mypy
def replace_modules_with_new_variants(
        manager: BuildManager, graph: Dict[str, State],
        old_modules: Dict[str, MypyFile], new_modules: Dict[str,
                                                            MypyFile]) -> None:
    """Replace modules with newly builds versions.

    Retain the identities of externally visible AST nodes in the
    old ASTs so that references to the affected modules from other
    modules will still be valid (unless something was deleted or
    replaced with an incompatible definition, in which case there
    will be dangling references that will be handled by
    propagate_changes_using_dependencies).
    """
    for id in new_modules:
        merge_asts(old_modules[id], old_modules[id].names, new_modules[id],
                   new_modules[id].names)
        manager.modules[id] = old_modules[id]
コード例 #19
0
 def _make_manager(self) -> BuildManager:
     errors = Errors()
     options = Options()
     fscache = FileSystemCache()
     manager = BuildManager(
         data_dir='',
         lib_path=[],
         ignore_prefix='',
         source_set=BuildSourceSet([]),
         reports=Reports('', {}),
         options=options,
         version_id=__version__,
         plugin=Plugin(options),
         errors=errors,
         flush_errors=lambda msgs, serious: None,
         fscache=fscache,
     )
     return manager
コード例 #20
0
    def __init__(self, manager: BuildManager, graph: Graph) -> None:
        """Initialize fine-grained build based on a batch build.

        Args:
            manager: State of the build (mutated by this class)
            graph: Additional state of the build (only read to initialize state)
        """
        self.manager = manager
        self.options = manager.options
        self.previous_modules = get_module_to_path_map(manager)
        self.deps = get_all_dependencies(manager, graph, self.options)
        self.previous_targets_with_errors = manager.errors.targets()
        # Module, if any, that had blocking errors in the last run as (id, path) tuple.
        # TODO: Handle blocking errors in the initial build
        self.blocking_error = None  # type: Optional[Tuple[str, str]]
        # Module that we haven't processed yet but that are known to be stale.
        self.stale = []  # type: List[Tuple[str, str]]
        mark_all_meta_as_memory_only(graph, manager)
        manager.saved_cache = preserve_full_cache(graph, manager)
        self.type_maps = extract_type_maps(graph)
コード例 #21
0
ファイル: testgraph.py プロジェクト: truepositiontruefix/mypy
 def test_sorted_components(self) -> None:
     manager = BuildManager(data_dir='',
                            lib_path=[],
                            target=TYPE_CHECK,
                            pyversion=(3, 5),
                            flags=[],
                            ignore_prefix='',
                            custom_typing_module='',
                            source_set=None,
                            reports=None)
     graph = {
         'a': State('a', None, 'import b, c', manager),
         'b': State('b', None, 'import c', manager),
         'c': State('c', None, 'import b, d', manager),
         'd': State('d', None, 'pass', manager)
     }
     res = sorted_components(graph)
     assert_equal(
         res, [frozenset({'d'}),
               frozenset({'c', 'b'}),
               frozenset({'a'})])
コード例 #22
0
 def _make_manager(self) -> BuildManager:
     errors = Errors()
     options = Options()
     fscache = FileSystemCache()
     search_paths = SearchPaths((), (), (), ())
     manager = BuildManager(
         data_dir='',
         search_paths=search_paths,
         ignore_prefix='',
         source_set=BuildSourceSet([]),
         reports=Reports('', {}),
         options=options,
         version_id=__version__,
         plugin=Plugin(options),
         plugins_snapshot={},
         errors=errors,
         flush_errors=lambda msgs, serious: None,
         fscache=fscache,
         stdout=sys.stdout,
         stderr=sys.stderr,
     )
     return manager
コード例 #23
0
ファイル: update.py プロジェクト: sixolet/mypy
def replace_modules_with_new_variants(
        manager: BuildManager,
        graph: Dict[str, State],
        old_modules: Dict[str, MypyFile],
        new_modules: Dict[str, Optional[MypyFile]]) -> None:
    """Replace modules with newly builds versions.

    Retain the identities of externally visible AST nodes in the
    old ASTs so that references to the affected modules from other
    modules will still be valid (unless something was deleted or
    replaced with an incompatible definition, in which case there
    will be dangling references that will be handled by
    propagate_changes_using_dependencies).
    """
    for id in new_modules:
        new_module = new_modules[id]
        if id in old_modules and new_module is not None:
            preserved_module = old_modules[id]
            merge_asts(preserved_module, old_modules[id].names,
                       new_module, new_module.names)
            manager.modules[id] = preserved_module
            graph[id].tree = preserved_module
コード例 #24
0
ファイル: update.py プロジェクト: rkday/mypy
def reprocess_nodes(manager: BuildManager, graph: Dict[str, State],
                    module_id: str, nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained(
            '%s not in graph (blocking errors or deleted?)' % module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path,
                                          file_node.ignored_lines)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer_pass3.refresh_partial(
                deferred.node, patches)

    apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node,
                       new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
コード例 #25
0
ファイル: update.py プロジェクト: rkday/mypy
def update_single_isolated(module: str, path: str, manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompleError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph

    Returns a named tuple describing the result (see above for details).
    """
    if module in manager.modules:
        assert_equivalent_paths(path, manager.modules[module].path)
    else:
        manager.log_fine_grained('new module %r' % module)

    old_modules = dict(manager.modules)
    sources = get_sources(previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        if err.module_with_blocker != module:
            # Blocker is in a fresh module. Delete the state of the original target module
            # since it will be stale.
            #
            # TODO: It would be more efficient to store the original target module
            path = manager.modules[module].path
            del manager.modules[module]
            remaining_modules = [(module, path)]
        else:
            remaining_modules = []
        return BlockedUpdate(err.module_with_blocker, path, remaining_modules,
                             err.messages)

    if not os.path.isfile(path):
        delete_module(module, graph, manager)
        return NormalUpdate(module, path, [], None)

    # Find any other modules brought in by imports.
    changed_modules = get_all_changed_modules(module, path, previous_modules,
                                              graph)
    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        for id, _ in remaining_modules:
            if id in old_modules:
                manager.modules[id] = old_modules[id]
            else:
                del manager.modules[id]
            del graph[id]
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        manager.modules.clear()
        manager.modules.update(old_modules)
        del graph[module]
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    assert state.tree is not None, "file must be at least parsed"
    new_modules = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, old_modules, new_modules)

    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    state.compute_fine_grained_deps()
    state.finish_passes()
    # TODO: state.write_cache()?
    # TODO: state.mark_as_rechecked()?

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
コード例 #26
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # If one of the nodes is the module itself, emit any errors that
    # happened before semantic analysis.
    for target in targets:
        if target == module_id:
            for info in graph[module_id].early_errors:
                manager.errors.add_error_info(info)

    # Strip semantic analysis information.
    saved_attrs = {}  # type: SavedAttributes
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname)
        strip_target(deferred.node, saved_attrs)
    semantic_analysis_for_targets(graph[module_id], nodes, graph, saved_attrs)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname,
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    graph[module_id].free_state()

    return new_triggered
コード例 #27
0
def propagate_changes_using_dependencies(
        manager: BuildManager,
        graph: Dict[str, State],
        deps: Dict[str, Set[str]],
        triggered: Set[str],
        up_to_date_modules: Set[str],
        targets_with_errors: Set[str],
        processed_targets: List[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet.

    Processed targets should be appended to processed_targets (used in tests only,
    to test the order of processing targets).
    """

    num_iter = 0
    remaining_modules = []  # type: List[Tuple[str, str]]

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)

        todo, unloaded, stale_protos = find_targets_recursive(manager, graph,
                                                              triggered, deps, up_to_date_modules)
        # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
        remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(graph, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' % target)
                more_nodes, _ = lookup_target(manager, target)
                todo[id].update(more_nodes)
        triggered = set()
        # First invalidate subtype caches in all stale protocols.
        # We need to do this to avoid false negatives if the protocol itself is
        # unchanged, but was marked stale because its sub- (or super-) type changed.
        for info in stale_protos:
            TypeState.reset_subtype_caches_for(info)
        # Then fully reprocess all targets.
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            triggered |= reprocess_nodes(manager, graph, id, nodes, deps, processed_targets)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
コード例 #28
0
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompileError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module not in graph:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, path, graph, manager)
        return NormalUpdate(module, path, [], None)

    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    orig_module = module
    orig_state = graph.get(module)
    orig_tree = manager.modules.get(module)

    def restore(ids: List[str]) -> None:
        # For each of the modules in ids, restore that id's old
        # manager.modules and graphs entries. (Except for the original
        # module, this means deleting them.)
        for id in ids:
            if id == orig_module and orig_tree:
                manager.modules[id] = orig_tree
            elif id in manager.modules:
                del manager.modules[id]
            if id == orig_module and orig_state:
                graph[id] = orig_state
            elif id in graph:
                del graph[id]

    new_modules = []  # type: List[State]
    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph, new_modules)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        restore([module] + [st.id for st in new_modules])
        return BlockedUpdate(err.module_with_blocker, path, [], err.messages)

    # Reparsing the file may have brought in dependencies that we
    # didn't have before. Make sure that they are loaded to restore
    # the invariant that a module having a loaded tree implies that
    # its dependencies do as well.
    ensure_trees_loaded(manager, graph, graph[module].dependencies)

    # Find any other modules brought in by imports.
    changed_modules = [(st.id, st.xpath) for st in new_modules]

    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        restore([id for id, _ in remaining_modules])
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    assert state.tree is not None, "file must be at least parsed"
    t0 = time.time()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        semantic_analysis_for_scc(graph, [state.id], manager.errors)
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        restore([module])
        return BlockedUpdate(module, path, remaining_modules, err.messages)

    # Merge old and new ASTs.
    new_modules_dict = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)

    t1 = time.time()
    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    t2 = time.time()
    state.finish_passes()
    t3 = time.time()
    manager.add_stats(
        semanal_time=t1 - t0,
        typecheck_time=t2 - t1,
        finish_passes_time=t3 - t2)

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
コード例 #29
0
def fix_fg_dependencies(manager: BuildManager, deps: Dict[str, Set[str]]) -> None:
    """Populate the dependencies with stuff that build may have missed"""
    # This means the root module and typestate
    merge_dependencies(manager.load_fine_grained_deps(FAKE_ROOT_MODULE), deps)
コード例 #30
0
ファイル: update.py プロジェクト: Michael0x2a/mypy
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    patches = []  # type: List[Callable[[], None]]
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname())
        if not manager.options.new_semantic_analyzer:
            strip_target(deferred.node)
        else:
            patches = strip_target_new(deferred.node)
    if not options.new_semantic_analyzer:
        re_analyze_nodes(file_node, nodes, manager, options)
    else:
        process_selected_targets(graph[module_id], nodes, graph, patches)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    return new_triggered
コード例 #31
0
def build_incremental_step(
        manager: BuildManager, changed_modules: List[Tuple[str, str]],
        graph: Dict[str,
                    State]) -> Tuple[Dict[str, Optional[MypyFile]], Graph]:
    """Build new versions of changed modules only.

    Raise CompleError on encountering a blocking error.

    Return the new ASTs for the changed modules and the entire build graph.
    """
    # TODO: Handle multiple changed modules per step
    assert len(changed_modules) == 1
    id, path = changed_modules[0]
    if id in manager.modules:
        path1 = os.path.normpath(path)
        path2 = os.path.normpath(manager.modules[id].path)
        assert path1 == path2, '%s != %s' % (path1, path2)

    old_modules = dict(manager.modules)

    sources = get_sources(graph, changed_modules)
    changed_set = {id for id, _ in changed_modules}

    invalidate_stale_cache_entries(manager.saved_cache, changed_modules)

    if not os.path.isfile(path):
        graph = delete_module(id, graph, manager)
        return {id: None}, graph

    old_graph = graph
    manager.missing_modules = set()
    graph = load_graph(sources, manager)

    # Find any other modules brought in by imports.
    for st in graph.values():
        if st.id not in old_graph and st.id not in changed_set:
            changed_set.add(st.id)
            assert st.path
            changed_modules.append((st.id, st.path))
    # TODO: Handle multiple changed modules per step
    assert len(changed_modules) == 1, changed_modules

    state = graph[id]

    # Parse file and run first pass of semantic analysis.
    state.parse_file()

    # TODO: state.fix_suppressed_dependencies()?

    # Run remaining passes of semantic analysis.
    try:
        state.semantic_analysis()
    except CompileError as err:
        # TODO: What if there are multiple changed modules?
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        manager.modules.clear()
        manager.modules.update(old_modules)
        raise err
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    assert state.tree is not None, "file must be at least parsed"
    new_modules = {id: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, old_modules, new_modules)

    # Perform type checking.
    state.type_check_first_pass()
    # TODO: state.type_check_second_pass()?
    state.finish_passes()
    # TODO: state.write_cache()?
    # TODO: state.mark_as_rechecked()?
    # TODO: Store new State in graph, as it has updated dependencies etc.

    graph[id] = state

    return new_modules, graph
コード例 #32
0
ファイル: update.py プロジェクト: sixolet/mypy
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path, file_node.ignored_lines)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo,
                scope=manager.semantic_analyzer_pass3.scope):
            manager.semantic_analyzer_pass3.refresh_partial(deferred.node, patches)

    with semantic_analyzer.file_context(
            file_node=file_node,
            fnam=file_node.path,
            options=manager.options,
            active_type=None):
        apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
コード例 #33
0
ファイル: update.py プロジェクト: chadrik/mypy
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompileError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module not in graph:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, path, graph, manager)
        return NormalUpdate(module, path, [], None)

    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    orig_module = module
    orig_state = graph.get(module)
    orig_tree = manager.modules.get(module)

    def restore(ids: List[str]) -> None:
        # For each of the modules in ids, restore that id's old
        # manager.modules and graphs entries. (Except for the original
        # module, this means deleting them.)
        for id in ids:
            if id == orig_module and orig_tree:
                manager.modules[id] = orig_tree
            elif id in manager.modules:
                del manager.modules[id]
            if id == orig_module and orig_state:
                graph[id] = orig_state
            elif id in graph:
                del graph[id]

    new_modules = []  # type: List[State]
    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph, new_modules)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        restore([module] + [st.id for st in new_modules])
        return BlockedUpdate(err.module_with_blocker, path, [], err.messages)

    # Reparsing the file may have brought in dependencies that we
    # didn't have before. Make sure that they are loaded to restore
    # the invariant that a module having a loaded tree implies that
    # its dependencies do as well.
    ensure_trees_loaded(manager, graph, graph[module].dependencies)

    # Find any other modules brought in by imports.
    changed_modules = [(st.id, st.xpath) for st in new_modules]

    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        restore([id for id, _ in remaining_modules])
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    assert state.tree is not None, "file must be at least parsed"
    t0 = time.time()
    # TODO: state.fix_suppressed_dependencies()?
    if module == 'typing':
        # We need to manually add typing aliases to builtins, like we
        # do in process_stale_scc. Because this can't be done until
        # builtins is also loaded, there isn't an obvious way to
        # refactor this.
        manager.semantic_analyzer.add_builtin_aliases(state.tree)
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        restore([module])
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    new_modules_dict = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)

    t1 = time.time()
    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    t2 = time.time()
    state.compute_fine_grained_deps()
    t3 = time.time()
    state.finish_passes()
    t4 = time.time()
    manager.add_stats(
        semanal_time=t1 - t0,
        typecheck_time=t2 - t1,
        deps_time=t3 - t2,
        finish_passes_time=t4 - t3)

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
コード例 #34
0
ファイル: update.py プロジェクト: sixolet/mypy
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompleError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module in manager.modules:
        assert_equivalent_paths(path, manager.modules[module].path)
    else:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, graph, manager)
        return NormalUpdate(module, path, [], None)

    old_modules = dict(manager.modules)
    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        if err.module_with_blocker != module:
            # Blocker is in a fresh module. Delete the state of the original target module
            # since it will be stale.
            #
            # TODO: It would be more efficient to store the original target module
            path = manager.modules[module].path
            del manager.modules[module]
            remaining_modules = [(module, path)]
        else:
            remaining_modules = []
        return BlockedUpdate(err.module_with_blocker, path, remaining_modules, err.messages)

    # Find any other modules brought in by imports.
    changed_modules = get_all_changed_modules(module, path, previous_modules, graph)
    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        for id, _ in remaining_modules:
            if id in old_modules:
                manager.modules[id] = old_modules[id]
            else:
                del manager.modules[id]
            del graph[id]
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        manager.modules.clear()
        manager.modules.update(old_modules)
        del graph[module]
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    assert state.tree is not None, "file must be at least parsed"
    new_modules = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, old_modules, new_modules)

    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    state.compute_fine_grained_deps()
    state.finish_passes()

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)