Пример #1
0
def find_targets_recursive(
        manager: BuildManager,
        graph: Graph,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        up_to_date_modules: Set[str]) -> Tuple[Dict[str, Set[FineGrainedDeferredNode]],
                                               Set[str], Set[TypeInfo]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: A tuple containing a:
     * Dictionary from module id to a set of stale targets.
     * A set of module ids for unparsed modules with stale targets.
    """
    result = {}  # type: Dict[str, Set[FineGrainedDeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]
    stale_protos = set()  # type: Set[TypeInfo]
    unloaded_files = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                module_id = module_prefix(graph, trigger_to_target(target))
                if module_id:
                    ensure_deps_loaded(module_id, deps, graph)

                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(graph, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if (module_id not in manager.modules
                        or manager.modules[module_id].is_cache_skeleton):
                    # We haven't actually parsed and checked the module, so we don't have
                    # access to the actual nodes.
                    # Add it to the queue of files that need to be processed fully.
                    unloaded_files.add(module_id)
                    continue

                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process: %s' % target)
                deferred, stale_proto = lookup_target(manager, target)
                if stale_proto:
                    stale_protos.add(stale_proto)
                result[module_id].update(deferred)

    return result, unloaded_files, stale_protos
Пример #2
0
def propagate_changes_using_dependencies(
        manager: BuildManager, graph: Dict[str, State], deps: Dict[str,
                                                                   Set[str]],
        triggered: Set[str], up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    num_iter = 0
    remaining_modules = []  # type: List[Tuple[str, str]]

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError(
                'Max number of iterations (%d) reached (endless loop?)' %
                MAX_ITER)

        todo, unloaded, stale_protos = find_targets_recursive(
            manager, graph, triggered, deps, up_to_date_modules)
        # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
        remaining_modules.extend(
            (id, graph[id].xpath) for id in sorted(unloaded))
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(graph, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' %
                                         target)
                more_nodes, _ = lookup_target(manager, target)
                todo[id].update(more_nodes)
        triggered = set()
        # First invalidate subtype caches in all stale protocols.
        # We need to do this to avoid false negatives if the protocol itself is
        # unchanged, but was marked stale because its sub- (or super-) type changed.
        for info in stale_protos:
            TypeState.reset_subtype_caches_for(info)
        # Then fully reprocess all targets.
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
Пример #3
0
def find_targets_recursive(
        manager: BuildManager,
        graph: Graph,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        up_to_date_modules: Set[str]) -> Tuple[Dict[str, Set[FineGrainedDeferredNode]],
                                               Set[str], Set[TypeInfo]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: A tuple containing a:
     * Dictionary from module id to a set of stale targets.
     * A set of module ids for unparsed modules with stale targets.
    """
    result = {}  # type: Dict[str, Set[FineGrainedDeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]
    stale_protos = set()  # type: Set[TypeInfo]
    unloaded_files = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                module_id = module_prefix(graph, trigger_to_target(target))
                if module_id:
                    ensure_deps_loaded(module_id, deps, graph)

                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(graph, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if (module_id not in manager.modules
                        or manager.modules[module_id].is_cache_skeleton):
                    # We haven't actually parsed and checked the module, so we don't have
                    # access to the actual nodes.
                    # Add it to the queue of files that need to be processed fully.
                    unloaded_files.add(module_id)
                    continue

                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process: %s' % target)
                deferred, stale_proto = lookup_target(manager, target)
                if stale_proto:
                    stale_protos.add(stale_proto)
                result[module_id].update(deferred)

    return result, unloaded_files, stale_protos
Пример #4
0
def ensure_trees_loaded(manager: BuildManager, graph: Dict[str, State],
                        initial: Sequence[str]) -> None:
    """Ensure that the modules in initial and their deps have loaded trees."""
    to_process = find_unloaded_deps(manager, graph, initial)
    if to_process:
        if is_verbose(manager):
            manager.log_fine_grained("Calling process_fresh_modules on set of size {} ({})".format(
                len(to_process), sorted(to_process)))
        process_fresh_modules(graph, to_process, manager)
Пример #5
0
def ensure_trees_loaded(manager: BuildManager, graph: Dict[str, State],
                        initial: Sequence[str]) -> None:
    """Ensure that the modules in initial and their deps have loaded trees."""
    to_process = find_unloaded_deps(manager, graph, initial)
    if to_process:
        if is_verbose(manager):
            manager.log_fine_grained("Calling process_fresh_modules on set of size {} ({})".format(
                len(to_process), sorted(to_process)))
        process_fresh_modules(graph, to_process, manager)
Пример #6
0
def propagate_changes_using_dependencies(
        manager: BuildManager,
        graph: Dict[str, State],
        deps: Dict[str, Set[str]],
        triggered: Set[str],
        up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    num_iter = 0
    remaining_modules = []  # type: List[Tuple[str, str]]

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)

        todo, unloaded, stale_protos = find_targets_recursive(manager, graph,
                                                              triggered, deps, up_to_date_modules)
        # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
        remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(graph, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' % target)
                more_nodes, _ = lookup_target(manager, target)
                todo[id].update(more_nodes)
        triggered = set()
        # First invalidate subtype caches in all stale protocols.
        # We need to do this to avoid false negatives if the protocol itself is
        # unchanged, but was marked stale because its sub- (or super-) type changed.
        for info in stale_protos:
            TypeState.reset_subtype_caches_for(info)
        # Then fully reprocess all targets.
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
Пример #7
0
def propagate_changes_using_dependencies(
        manager: BuildManager, graph: Dict[str, State], deps: Dict[str,
                                                                   Set[str]],
        triggered: Set[str], up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    # TODO: Multiple type checking passes
    num_iter = 0
    remaining_modules = []

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError(
                'Max number of iterations (%d) reached (endless loop?)' %
                MAX_ITER)

        todo = find_targets_recursive(manager, triggered, deps,
                                      up_to_date_modules)
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(manager.modules, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' %
                                         target)
                todo[id].update(lookup_target(manager, target))
        triggered = set()
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            if manager.modules[id].is_cache_skeleton:
                # We have only loaded the cache for this file, not the actual file,
                # so we can't access the nodes to reprocess.
                # Add it to the queue of files that need to be processed fully.
                remaining_modules.append((id, manager.modules[id].path))
            else:
                triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
Пример #8
0
def propagate_changes_using_dependencies(
        manager: BuildManager,
        graph: Dict[str, State],
        deps: Dict[str, Set[str]],
        triggered: Set[str],
        up_to_date_modules: Set[str],
        targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
    """Transitively rechecks targets based on triggers and the dependency map.

    Returns a list (module id, path) tuples representing modules that contain
    a target that needs to be reprocessed but that has not been parsed yet."""

    num_iter = 0
    remaining_modules = []

    # Propagate changes until nothing visible has changed during the last
    # iteration.
    while triggered or targets_with_errors:
        num_iter += 1
        if num_iter > MAX_ITER:
            raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)

        todo = find_targets_recursive(manager, triggered, deps, up_to_date_modules)
        # Also process targets that used to have errors, as otherwise some
        # errors might be lost.
        for target in targets_with_errors:
            id = module_prefix(manager.modules, target)
            if id is not None and id not in up_to_date_modules:
                if id not in todo:
                    todo[id] = set()
                manager.log_fine_grained('process target with error: %s' % target)
                todo[id].update(lookup_target(manager, target))
        triggered = set()
        # TODO: Preserve order (set is not optimal)
        for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
            assert id not in up_to_date_modules
            if manager.modules[id].is_cache_skeleton:
                # We have only loaded the cache for this file, not the actual file,
                # so we can't access the nodes to reprocess.
                # Add it to the queue of files that need to be processed fully.
                remaining_modules.append((id, manager.modules[id].path))
            else:
                triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
        # Changes elsewhere may require us to reprocess modules that were
        # previously considered up to date. For example, there may be a
        # dependency loop that loops back to an originally processed module.
        up_to_date_modules = set()
        targets_with_errors = set()
        if is_verbose(manager):
            manager.log_fine_grained('triggered: %r' % list(triggered))

    return remaining_modules
Пример #9
0
def delete_module(module_id: str, graph: Graph, manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
Пример #10
0
def delete_module(module_id: str,
                  graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
Пример #11
0
def delete_module(module_id: str, path: str, graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
    # If the module is removed from the build but still exists, then
    # we mark it as missing so that it will get picked up by import from still.
    if manager.fscache.isfile(path):
        manager.missing_modules.add(module_id)
Пример #12
0
def delete_module(module_id: str, graph: Dict[str, State],
                  manager: BuildManager) -> Dict[str, State]:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Deletion of a package
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    assert module_id not in graph
    new_graph = graph.copy()
    if module_id in manager.modules:
        del manager.modules[module_id]
    if module_id in manager.saved_cache:
        del manager.saved_cache[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
    return new_graph
Пример #13
0
def find_targets_recursive(
        manager: BuildManager,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        modules: Dict[str, MypyFile],
        up_to_date_modules: Set[str]) -> Dict[str, Set[DeferredNode]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: Dictionary from module id to a set of stale targets.
    """
    result = {}  # type: Dict[str, Set[DeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(modules, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process %s' % target)
                deferred = lookup_target(modules, target)
                result[module_id].update(deferred)

    return result
Пример #14
0
def find_targets_recursive(
        manager: BuildManager,
        triggers: Set[str],
        deps: Dict[str, Set[str]],
        up_to_date_modules: Set[str]) -> Dict[str, Set[DeferredNode]]:
    """Find names of all targets that need to reprocessed, given some triggers.

    Returns: Dictionary from module id to a set of stale targets.
    """
    result = {}  # type: Dict[str, Set[DeferredNode]]
    worklist = triggers
    processed = set()  # type: Set[str]

    # Find AST nodes corresponding to each target.
    #
    # TODO: Don't rely on a set, since the items are in an unpredictable order.
    while worklist:
        processed |= worklist
        current = worklist
        worklist = set()
        for target in current:
            if target.startswith('<'):
                worklist |= deps.get(target, set()) - processed
            else:
                module_id = module_prefix(manager.modules, target)
                if module_id is None:
                    # Deleted module.
                    continue
                if module_id in up_to_date_modules:
                    # Already processed.
                    continue
                if module_id not in result:
                    result[module_id] = set()
                manager.log_fine_grained('process: %s' % target)
                deferred = lookup_target(manager, target)
                result[module_id].update(deferred)

    return result
Пример #15
0
def delete_module(module_id: str,
                  path: str,
                  graph: Graph,
                  manager: BuildManager) -> None:
    manager.log_fine_grained('delete module %r' % module_id)
    # TODO: Remove deps for the module (this only affects memory use, not correctness)
    if module_id in graph:
        del graph[module_id]
    if module_id in manager.modules:
        del manager.modules[module_id]
    components = module_id.split('.')
    if len(components) > 1:
        # Delete reference to module in parent module.
        parent_id = '.'.join(components[:-1])
        # If parent module is ignored, it won't be included in the modules dictionary.
        if parent_id in manager.modules:
            parent = manager.modules[parent_id]
            if components[-1] in parent.names:
                del parent.names[components[-1]]
    # If the module is removed from the build but still exists, then
    # we mark it as missing so that it will get picked up by import from still.
    if manager.fscache.isfile(path):
        manager.missing_modules.add(module_id)
Пример #16
0
def reprocess_nodes(manager: BuildManager, graph: Dict[str, State],
                    module_id: str, nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained(
            '%s not in graph (blocking errors or deleted?)' % module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path,
                                          file_node.ignored_lines)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer_pass3.refresh_partial(
                deferred.node, patches)

    apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(),
                                               file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node,
                       new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(),
                                                 file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
Пример #17
0
def update_single_isolated(module: str, path: str, manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompleError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph

    Returns a named tuple describing the result (see above for details).
    """
    if module in manager.modules:
        assert_equivalent_paths(path, manager.modules[module].path)
    else:
        manager.log_fine_grained('new module %r' % module)

    old_modules = dict(manager.modules)
    sources = get_sources(previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        if err.module_with_blocker != module:
            # Blocker is in a fresh module. Delete the state of the original target module
            # since it will be stale.
            #
            # TODO: It would be more efficient to store the original target module
            path = manager.modules[module].path
            del manager.modules[module]
            remaining_modules = [(module, path)]
        else:
            remaining_modules = []
        return BlockedUpdate(err.module_with_blocker, path, remaining_modules,
                             err.messages)

    if not os.path.isfile(path):
        delete_module(module, graph, manager)
        return NormalUpdate(module, path, [], None)

    # Find any other modules brought in by imports.
    changed_modules = get_all_changed_modules(module, path, previous_modules,
                                              graph)
    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        for id, _ in remaining_modules:
            if id in old_modules:
                manager.modules[id] = old_modules[id]
            else:
                del manager.modules[id]
            del graph[id]
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        manager.modules.clear()
        manager.modules.update(old_modules)
        del graph[module]
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    assert state.tree is not None, "file must be at least parsed"
    new_modules = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, old_modules, new_modules)

    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    state.compute_fine_grained_deps()
    state.finish_passes()
    # TODO: state.write_cache()?
    # TODO: state.mark_as_rechecked()?

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
Пример #18
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # If one of the nodes is the module itself, emit any errors that
    # happened before semantic analysis.
    for target in targets:
        if target == module_id:
            for info in graph[module_id].early_errors:
                manager.errors.add_error_info(info)

    # Strip semantic analysis information.
    saved_attrs = {}  # type: SavedAttributes
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname)
        strip_target(deferred.node, saved_attrs)
    semantic_analysis_for_targets(graph[module_id], nodes, graph, saved_attrs)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname,
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    graph[module_id].free_state()

    return new_triggered
Пример #19
0
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompileError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module not in graph:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, path, graph, manager)
        return NormalUpdate(module, path, [], None)

    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    orig_module = module
    orig_state = graph.get(module)
    orig_tree = manager.modules.get(module)

    def restore(ids: List[str]) -> None:
        # For each of the modules in ids, restore that id's old
        # manager.modules and graphs entries. (Except for the original
        # module, this means deleting them.)
        for id in ids:
            if id == orig_module and orig_tree:
                manager.modules[id] = orig_tree
            elif id in manager.modules:
                del manager.modules[id]
            if id == orig_module and orig_state:
                graph[id] = orig_state
            elif id in graph:
                del graph[id]

    new_modules = []  # type: List[State]
    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph, new_modules)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        restore([module] + [st.id for st in new_modules])
        return BlockedUpdate(err.module_with_blocker, path, [], err.messages)

    # Reparsing the file may have brought in dependencies that we
    # didn't have before. Make sure that they are loaded to restore
    # the invariant that a module having a loaded tree implies that
    # its dependencies do as well.
    ensure_trees_loaded(manager, graph, graph[module].dependencies)

    # Find any other modules brought in by imports.
    changed_modules = [(st.id, st.xpath) for st in new_modules]

    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        restore([id for id, _ in remaining_modules])
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    assert state.tree is not None, "file must be at least parsed"
    t0 = time.time()
    # TODO: state.fix_suppressed_dependencies()?
    if module == 'typing':
        # We need to manually add typing aliases to builtins, like we
        # do in process_stale_scc. Because this can't be done until
        # builtins is also loaded, there isn't an obvious way to
        # refactor this.
        manager.semantic_analyzer.add_builtin_aliases(state.tree)
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        restore([module])
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    new_modules_dict = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)

    t1 = time.time()
    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    t2 = time.time()
    state.compute_fine_grained_deps()
    t3 = time.time()
    state.finish_passes()
    t4 = time.time()
    manager.add_stats(
        semanal_time=t1 - t0,
        typecheck_time=t2 - t1,
        deps_time=t3 - t2,
        finish_passes_time=t4 - t3)

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
Пример #20
0
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompileError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module not in graph:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, path, graph, manager)
        return NormalUpdate(module, path, [], None)

    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    orig_module = module
    orig_state = graph.get(module)
    orig_tree = manager.modules.get(module)

    def restore(ids: List[str]) -> None:
        # For each of the modules in ids, restore that id's old
        # manager.modules and graphs entries. (Except for the original
        # module, this means deleting them.)
        for id in ids:
            if id == orig_module and orig_tree:
                manager.modules[id] = orig_tree
            elif id in manager.modules:
                del manager.modules[id]
            if id == orig_module and orig_state:
                graph[id] = orig_state
            elif id in graph:
                del graph[id]

    new_modules = []  # type: List[State]
    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph, new_modules)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        restore([module] + [st.id for st in new_modules])
        return BlockedUpdate(err.module_with_blocker, path, [], err.messages)

    # Reparsing the file may have brought in dependencies that we
    # didn't have before. Make sure that they are loaded to restore
    # the invariant that a module having a loaded tree implies that
    # its dependencies do as well.
    ensure_trees_loaded(manager, graph, graph[module].dependencies)

    # Find any other modules brought in by imports.
    changed_modules = [(st.id, st.xpath) for st in new_modules]

    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        restore([id for id, _ in remaining_modules])
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    assert state.tree is not None, "file must be at least parsed"
    t0 = time.time()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        semantic_analysis_for_scc(graph, [state.id], manager.errors)
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        restore([module])
        return BlockedUpdate(module, path, remaining_modules, err.messages)

    # Merge old and new ASTs.
    new_modules_dict = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)

    t1 = time.time()
    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    t2 = time.time()
    state.finish_passes()
    t3 = time.time()
    manager.add_stats(
        semanal_time=t1 - t0,
        typecheck_time=t2 - t1,
        finish_passes_time=t3 - t2)

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)
Пример #21
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[DeferredNode],
                    deps: Dict[str, Set[str]]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: DeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    # TODO: ignore_all argument to set_file_ignored_lines
    manager.errors.set_file_ignored_lines(file_node.path, file_node.ignored_lines)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    for deferred in nodes:
        strip_target(deferred.node)
    semantic_analyzer = manager.semantic_analyzer

    patches = []  # type: List[Tuple[int, Callable[[], None]]]

    # Second pass of semantic analysis. We don't redo the first pass, because it only
    # does local things that won't go stale.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo):
            manager.semantic_analyzer.refresh_partial(deferred.node, patches)

    # Third pass of semantic analysis.
    for deferred in nodes:
        with semantic_analyzer.file_context(
                file_node=file_node,
                fnam=file_node.path,
                options=manager.options,
                active_type=deferred.active_typeinfo,
                scope=manager.semantic_analyzer_pass3.scope):
            manager.semantic_analyzer_pass3.refresh_partial(deferred.node, patches)

    with semantic_analyzer.file_context(
            file_node=file_node,
            fnam=file_node.path,
            options=manager.options,
            active_type=None):
        apply_semantic_analyzer_patches(patches)

    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, manager.options)

    # Report missing imports.
    verify_dependencies(graph[module_id], manager)

    return new_triggered
Пример #22
0
def reprocess_nodes(manager: BuildManager,
                    graph: Dict[str, State],
                    module_id: str,
                    nodeset: Set[FineGrainedDeferredNode],
                    deps: Dict[str, Set[str]],
                    processed_targets: List[str]) -> Set[str]:
    """Reprocess a set of nodes within a single module.

    Return fired triggers.
    """
    if module_id not in graph:
        manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
                    module_id)
        return set()

    file_node = manager.modules[module_id]
    old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    old_symbols = {name: names.copy() for name, names in old_symbols.items()}
    old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)

    def key(node: FineGrainedDeferredNode) -> int:
        # Unlike modules which are sorted by name within SCC,
        # nodes within the same module are sorted by line number, because
        # this is how they are processed in normal mode.
        return node.node.line

    nodes = sorted(nodeset, key=key)

    options = graph[module_id].options
    manager.errors.set_file_ignored_lines(
        file_node.path, file_node.ignored_lines, options.ignore_errors)

    targets = set()
    for node in nodes:
        target = target_from_node(module_id, node.node)
        if target is not None:
            targets.add(target)
    manager.errors.clear_errors_in_targets(file_node.path, targets)

    # Strip semantic analysis information.
    patches = []  # type: List[Callable[[], None]]
    for deferred in nodes:
        processed_targets.append(deferred.node.fullname())
        if not manager.options.new_semantic_analyzer:
            strip_target(deferred.node)
        else:
            patches = strip_target_new(deferred.node)
    if not options.new_semantic_analyzer:
        re_analyze_nodes(file_node, nodes, manager, options)
    else:
        process_selected_targets(graph[module_id], nodes, graph, patches)
    # Merge symbol tables to preserve identities of AST nodes. The file node will remain
    # the same, but other nodes may have been recreated with different identities, such as
    # NamedTuples defined using assignment statements.
    new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
    for name in old_symbols:
        if name in new_symbols:
            merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])

    # Type check.
    checker = graph[module_id].type_checker()
    checker.reset()
    # We seem to need additional passes in fine-grained incremental mode.
    checker.pass_num = 0
    checker.last_pass = 3
    more = checker.check_second_pass(nodes)
    while more:
        more = False
        if graph[module_id].type_checker().check_second_pass():
            more = True

    if manager.options.export_types:
        manager.all_types.update(graph[module_id].type_map())

    new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
    # Check if any attribute types were changed and need to be propagated further.
    changed = compare_symbol_table_snapshots(file_node.fullname(),
                                             old_symbols_snapshot,
                                             new_symbols_snapshot)
    new_triggered = {make_trigger(name) for name in changed}

    # Dependencies may have changed.
    update_deps(module_id, nodes, graph, deps, options)

    # Report missing imports.
    graph[module_id].verify_dependencies()

    return new_triggered
Пример #23
0
def update_module_isolated(module: str,
                           path: str,
                           manager: BuildManager,
                           previous_modules: Dict[str, str],
                           graph: Graph,
                           force_removed: bool) -> UpdateResult:
    """Build a new version of one changed module only.

    Don't propagate changes to elsewhere in the program. Raise CompleError on
    encountering a blocking error.

    Args:
        module: Changed module (modified, created or deleted)
        path: Path of the changed module
        manager: Build manager
        graph: Build graph
        force_removed: If True, consider the module removed from the build even it the
            file exists

    Returns a named tuple describing the result (see above for details).
    """
    if module in manager.modules:
        assert_equivalent_paths(path, manager.modules[module].path)
    else:
        manager.log_fine_grained('new module %r' % module)

    if not manager.fscache.isfile(path) or force_removed:
        delete_module(module, graph, manager)
        return NormalUpdate(module, path, [], None)

    old_modules = dict(manager.modules)
    sources = get_sources(manager.fscache, previous_modules, [(module, path)])

    if module in manager.missing_modules:
        manager.missing_modules.remove(module)

    try:
        if module in graph:
            del graph[module]
        load_graph(sources, manager, graph)
    except CompileError as err:
        # Parse error somewhere in the program -- a blocker
        assert err.module_with_blocker
        if err.module_with_blocker != module:
            # Blocker is in a fresh module. Delete the state of the original target module
            # since it will be stale.
            #
            # TODO: It would be more efficient to store the original target module
            path = manager.modules[module].path
            del manager.modules[module]
            remaining_modules = [(module, path)]
        else:
            remaining_modules = []
        return BlockedUpdate(err.module_with_blocker, path, remaining_modules, err.messages)

    # Find any other modules brought in by imports.
    changed_modules = get_all_changed_modules(module, path, previous_modules, graph)
    # If there are multiple modules to process, only process one of them and return
    # the remaining ones to the caller.
    if len(changed_modules) > 1:
        # As an optimization, look for a module that imports no other changed modules.
        module, path = find_relative_leaf_module(changed_modules, graph)
        changed_modules.remove((module, path))
        remaining_modules = changed_modules
        # The remaining modules haven't been processed yet so drop them.
        for id, _ in remaining_modules:
            if id in old_modules:
                manager.modules[id] = old_modules[id]
            else:
                del manager.modules[id]
            del graph[id]
        manager.log_fine_grained('--> %r (newly imported)' % module)
    else:
        remaining_modules = []

    state = graph[module]

    # Process the changed file.
    state.parse_file()
    # TODO: state.fix_suppressed_dependencies()?
    try:
        state.semantic_analysis()
    except CompileError as err:
        # There was a blocking error, so module AST is incomplete. Restore old modules.
        manager.modules.clear()
        manager.modules.update(old_modules)
        del graph[module]
        return BlockedUpdate(module, path, remaining_modules, err.messages)
    state.semantic_analysis_pass_three()
    state.semantic_analysis_apply_patches()

    # Merge old and new ASTs.
    assert state.tree is not None, "file must be at least parsed"
    new_modules = {module: state.tree}  # type: Dict[str, Optional[MypyFile]]
    replace_modules_with_new_variants(manager, graph, old_modules, new_modules)

    # Perform type checking.
    state.type_checker().reset()
    state.type_check_first_pass()
    state.type_check_second_pass()
    state.compute_fine_grained_deps()
    state.finish_passes()

    graph[module] = state

    return NormalUpdate(module, path, remaining_modules, state.tree)