Example #1
0
def _sort_steps(steps: typing.List["_Step"]) -> typing.Tuple[str, ...]:
    """Return a tuple comprised by step names in the order they should be
    executed."""
    graph = defaultdict(set)
    if len(steps) == 1:
        return (steps[0].get_name(), )
    edged_nodes = set()
    for step in steps:
        for other in steps:
            if step == other:
                continue
            step_outputs = set([io.get_name() for io in step.get_outputs()])
            other_inputs = set([io.get_name() for io in other.get_inputs()])
            if len(step_outputs) > 0 and not step_outputs.isdisjoint(
                    other_inputs):
                graph[other.get_name()].add(step.get_name())
                edged_nodes.add(step.get_name())
                edged_nodes.add(other.get_name())

    isolated_nodes = set([step.get_name() for step in steps]) - edged_nodes
    for node in isolated_nodes:
        graph[node] = set()

    ts = TopologicalSorter(graph)
    return tuple(ts.static_order())
 def destCity(self, paths: List[List[str]]) -> str:
     from graphlib import TopologicalSorter
     adict = {}
     for key, value in paths:
         adict[value] = {key}
     ts = TopologicalSorter(adict)
     return tuple(ts.static_order())[-1]
Example #3
0
def _sort_steps(steps: Sequence["_Step"]) -> Tuple[str, ...]:
    """Return a tuple comprised by step names in the order they should be
    executed."""
    graph = defaultdict(set)
    if len(steps) == 1:
        return (steps[0].name, )
    edged_nodes = set()
    for step in steps:
        for other in steps:
            if step == other:
                continue
            step_outputs = set(io.name for io in step.outputs)
            other_inputs = set(io.name for io in other.inputs)
            if len(step_outputs) > 0 and not step_outputs.isdisjoint(
                    other_inputs):
                graph[other.name].add(step.name)
                edged_nodes.add(step.name)
                edged_nodes.add(other.name)

    isolated_nodes = set(step.name for step in steps) - edged_nodes
    for node in isolated_nodes:
        graph[node] = set()

    ts = TopologicalSorter(graph)
    return tuple(ts.static_order())
def find_models(module: ModuleType) -> List[Type[Model]]:
    """
    Find all models in a migration script.
    """
    models: List[Type[Model]] = []
    tables = extract_modified_tables(module)

    # add models defined explicitly in the migration script
    queue = list(module.__dict__.values())
    while queue:
        obj = queue.pop()
        if hasattr(obj, "__tablename__"):
            tables.add(obj.__tablename__)
        elif isinstance(obj, list):
            queue.extend(obj)
        elif isinstance(obj, dict):
            queue.extend(obj.values())

    # build models by automapping the existing tables, instead of using current
    # code; this is needed for migrations that modify schemas (eg, add a column),
    # where the current model is out-of-sync with the existing table after a
    # downgrade
    sqlalchemy_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
    engine = create_engine(sqlalchemy_uri)
    Base = automap_base()
    Base.prepare(engine, reflect=True)
    seen = set()
    while tables:
        table = tables.pop()
        seen.add(table)
        model = getattr(Base.classes, table)
        model.__tablename__ = table
        models.append(model)

        # add other models referenced in foreign keys
        inspector = inspect(model)
        for column in inspector.columns.values():
            for foreign_key in column.foreign_keys:
                table = foreign_key.column.table.name
                if table not in seen:
                    tables.add(table)

    # sort topologically so we can create entities in order and
    # maintain relationships (eg, create a database before creating
    # a slice)
    sorter = TopologicalSorter()
    for model in models:
        inspector = inspect(model)
        dependent_tables: List[str] = []
        for column in inspector.columns.values():
            for foreign_key in column.foreign_keys:
                if foreign_key.column.table.name != model.__tablename__:
                    dependent_tables.append(foreign_key.column.table.name)
        sorter.add(model.__tablename__, *dependent_tables)
    order = list(sorter.static_order())
    models.sort(key=lambda model: order.index(model.__tablename__))

    return models
Example #5
0
def sort(graph: Graph) -> List[Node]:
    """
    Linearize a dependency graph, throwing an exception if a cycle is detected.

    When no dependencies intervene in ordering, the algorithm preserves the
    original insertion order of the graph.

    :arg graph: a dict mapping each node to an iterable of their antecedents
    """
    sorter = TopologicalSorter(graph)
    return list(sorter.static_order())
Example #6
0
    def connections(self) -> List[Callable]:
        """All connection callbacks registered to this system.

        Sorted in topological order

        Returns:
            List[Callable]: Sorted list of connection callbacks
        """
        return list(TopologicalSorter(self._pre_connections).static_order()) + list(
            TopologicalSorter(self._post_connections).static_order()
        )
Example #7
0
    def get_ready(self):
        ts = TopologicalSorter()
        nodes = self.get_all().filter(lambda x: x == "not_running").to_dict(
            lambda x: x.name)

        for node in nodes.values():
            depends = Linq(node.depends).filter(
                lambda x: not x == "success").map(lambda x: x.name).to_list()
            ts.add(node.name, *depends)

        return Linq(ts.static_order()).map(lambda x: nodes[x]).save()
Example #8
0
    def _normalize_and_sort_symbols(self):
        """Removes __init__.py from symbols and returns dict sorted by symbol type"""
        tmp_sorted = {}
        topological_sorter = TopologicalSorter()
        relative_star_imports_volume = {}
        namespaces = defaultdict(list)
        for k, v in sorted(self._original_symbols.items(), key=lambda x: x[1]["type"]):
            new_symbol = k.replace(".__init__", "")
            if is_relative_import(v):
                shadows = resolve_relative_import(
                    **{k: v for k, v in v["data"].items() if k in {"module", "level", "shadows"}}
                )
                data = dict(v, data=dict(shadows=shadows))
                tmp_sorted[new_symbol] = data
                namespace, _, symbol = new_symbol.rpartition(".")
                namespaces[namespace].append(new_symbol)
            elif is_relative_star_import(v):
                imports = [
                    resolve_relative_import(**{k: v for k, v in data.items() if k in {"module", "level", "shadows"}})
                    for data in v["data"]["imports"]
                ]
                data = dict(v, data=dict(imports=imports))
                topological_sorter.add(new_symbol, *[f"{imp}.{RELATIVE_IMPORT_IDENTIFIER}.*" for imp in imports])
                relative_star_imports_volume[new_symbol] = data
                namespace, _, symbol = new_symbol.partition(f".{RELATIVE_IMPORT_IDENTIFIER}")
                namespaces[namespace].append(new_symbol)
            else:
                tmp_sorted[new_symbol] = v
                if get_symbol_type(v) not in {SymbolType.PACKAGE, SymbolType.MODULE}:
                    namespace = new_symbol.rpartition(".")[0]
                    namespaces[namespace].append(new_symbol)
        try:
            for rel_import in topological_sorter.static_order():
                if rel_import in relative_star_imports_volume:
                    tmp_sorted[rel_import] = relative_star_imports_volume[rel_import]
        except CycleError:
            pass

        shadows_by_in_module_symbol = {
            k: v["data"]["shadows"]
            for k, v in tmp_sorted.items()
            if "shadows" in v["data"] and v["type"] == SymbolType.RELATIVE_IMPORT
        }
        for k, v in tmp_sorted.items():
            volume = v["data"].get("symbols_in_volume", {})
            for volume_symbol in list(volume):
                no_init_volume_symbol = volume_symbol.replace(".__init__", "")
                if no_init_volume_symbol in shadows_by_in_module_symbol:
                    new_volume_symbol = shadows_by_in_module_symbol[no_init_volume_symbol]
                    volume[new_volume_symbol] = volume.pop(volume_symbol)

        self._sorted_symbols = tmp_sorted
        self._namespaces = namespaces
Example #9
0
    def _update_extensions(self) -> None:
        extension_types_enabled_in_configuration = {
            extension_configuration.extension_type
            for extension_configuration in self._configuration.extensions
            if extension_configuration.enabled
        }

        extension_types_sorter = TopologicalSorter(
            _build_extension_type_graph(
                extension_types_enabled_in_configuration))
        extension_types_sorter.prepare()

        extensions = []
        while extension_types_sorter.is_active():
            extension_types_batch = extension_types_sorter.get_ready()
            extensions_batch = []
            for extension_type in extension_types_batch:
                if issubclass(extension_type, ConfigurableExtension):
                    if extension_type not in extension_types_enabled_in_configuration or self._configuration.extensions[
                            extension_type].extension_type_configuration is None:
                        configuration = extension_type.default_configuration()
                    else:
                        configuration = self._configuration.extensions[
                            extension_type].extension_type_configuration
                    extension = extension_type(self, configuration)
                else:
                    extension = extension_type(self)
                extensions_batch.append(extension)
                extension_types_sorter.done(extension_type)
            extensions.append(extensions_batch)
        self._extensions._update(extensions)
def do_all_tasks(graph, task_queue, done_queue):
    topological_sorter = TopologicalSorter(graph)
    topological_sorter.prepare()
    while topological_sorter.is_active():
        for node in topological_sorter.get_ready():
            task_queue.put(node)
        node = done_queue.get()
        topological_sorter.done(node)
Example #11
0
    def build(self, step = ''):
        dep_graph = TopologicalSorter()
        for x, y in self.packages.items():
            dep_graph.add(x, *y.requirements)

        compile_order = [*dep_graph.static_order()]

        packages_to_compile_in_order = []
        for x in compile_order:
            packages_to_compile_in_order.append(self.packages[x])

        if step == 'reinstall':
            for pkg in packages_to_compile_in_order:
                if (not pkg.install(self.config)):
                    print(f"Failed to install {pkg.name}!")
                    return 1
                print(f"[nyx]: Installing {pkg.name}-{pkg.version}")
            return 0

        for pkg in packages_to_compile_in_order:
            if (pkg.cached):
                print(f"[nyx]: Using cached version of package {pkg.name}-{pkg.version}")
                continue
            print(f"[nyx]: Compiling {pkg.name}-{pkg.version}")
            # Fetch
            if (not pkg.fetch(self.config)):
                print(f"Failed to fetch {pkg.name}!")
                return 1
            # Patch
            if (not pkg.patch(self.config)):
                print(f"Failed to patch {pkg.name}!")
                return 1
            # Configure
            if (not pkg.configure(self.config, self.enviroment)):
                print(f"Failed to configure {pkg.name}!")
                return 1
            # Build
            if (not pkg.build(self.config, self.enviroment)):
                print(f"Failed to build {pkg.name}!")
                return 1
            # Package
            if (not pkg.package(self.config, self.enviroment)):
                print(f"Failed to package {pkg.name}!")
                return 1
            if (not pkg.install(self.config)):
                print(f"Failed to install {pkg.name}!")
                return 1
        return 0
Example #12
0
    def _step(self, time):
        # Recurse over subsystems
        for sub in self._subs:
            sub._step(time)

        # Apply pre-connections

        for con in TopologicalSorter(self._pre_connections).static_order():
            con(self, time)

        # Step this system
        self.do_step(time)

        # Apply post-connections
        for con in TopologicalSorter(self._post_connections).static_order():
            con(self, time)
Example #13
0
    def __sort_nodes(self,
                     from_nodes: frozenset['SvNode'] = None,
                     to_nodes: frozenset['SvNode'] = None)\
                     -> list[tuple['SvNode', list[NodeSocket]]]:
        """Sort nodes of the tree in proper execution order. Whe all given
        parameters are None it uses all tree nodes
        :from_nodes: if given it sorts only next nodes from given ones
        :to_nodes: if given it sorts only previous nodes from given
        If from_nodes and to_nodes are given it uses only intersection of next
        nodes from from_nodes and previous nodes from to_nodes"""
        nodes_to_walk = set()
        walk_structure = None
        if from_nodes is None and to_nodes is None:
            walk_structure = self._from_nodes
        elif from_nodes and to_nodes:
            from_ = self.nodes_from(from_nodes)
            to_ = self.nodes_to(to_nodes)
            nodes_to_walk = from_.intersection(to_)
        elif from_nodes:
            nodes_to_walk = self.nodes_from(from_nodes)
        else:
            nodes_to_walk = self.nodes_to(from_nodes)

        if nodes_to_walk:
            walk_structure: dict[SvNode, set[SvNode]] = defaultdict(set)
            for n in nodes_to_walk:
                if n in self._from_nodes:
                    walk_structure[n] = {_n for _n in self._from_nodes[n]
                                         if _n in nodes_to_walk}

        nodes = []
        if walk_structure:
            for node in TopologicalSorter(walk_structure).static_order():
                nodes.append((node, [self._from_sock.get(s) for s in node.inputs]))
        return nodes
Example #14
0
 def validate(self, result: Announcement):
     graph = TopologicalSorter()
     for dep in result.dependencies:
         graph.add(dep)
     for key, message in self.items():
         for dep in message.dependencies:
             graph.add(dep, key)
     graph.prepare()
     missing = graph._node2info.keys() - self.keys()
     if missing:
         raise ValueError(f"Missing items: {','.join(missing)}")
Example #15
0
 def sort_nodes(self, nodes: Iterable['SvNode']) -> list['SvNode']:
     """Returns nodes in order of their correct execution"""
     walk_structure: dict[SvNode, set[SvNode]] = defaultdict(set)
     for n in nodes:
         if n in self._from_nodes:
             walk_structure[n] = {_n for _n in self._from_nodes[n]
                                  if _n in nodes}
     nodes = []
     for node in TopologicalSorter(walk_structure).static_order():
         nodes.append(node)
     return nodes
Example #16
0
    def init_graph(self, operations_graph):
        ts = TopologicalSorter(operations_graph)
        elements_queue = [*ts.static_order()]

        for v in elements_queue:
            self.gather_global_operations(v)
        for op_name, op in self.global_operations_info.items():
            self.global_operations_info[
                op_name] = self.replace_all_get_functions(op)
        for v in elements_queue:
            (_, element_type, _) = utils.tosca_type_parse(v.type)
            if element_type == NODES or element_type == RELATIONSHIPS:
                new_conf_args = self.replace_all_get_functions(
                    v.configuration_args)
                v.configuration_args = new_conf_args
            else:
                del operations_graph[v]
                for key in operations_graph:
                    if v in operations_graph[key]:
                        operations_graph[key].remove(v)
        return operations_graph
Example #17
0
def find_models(module: ModuleType) -> List[Type[Model]]:
    """
    Find all models in a migration script.
    """
    models: List[Type[Model]] = []
    tables = extract_modified_tables(module)

    # add models defined explicitly in the migration script
    queue = list(module.__dict__.values())
    while queue:
        obj = queue.pop()
        if hasattr(obj, "__tablename__"):
            tables.add(obj.__tablename__)
        elif isinstance(obj, list):
            queue.extend(obj)
        elif isinstance(obj, dict):
            queue.extend(obj.values())

    # add implicit models
    # pylint: disable=no-member, protected-access
    for obj in Model._decl_class_registry.values():
        if hasattr(obj, "__table__") and obj.__table__.fullname in tables:
            models.append(obj)

    # sort topologically so we can create entities in order and
    # maintain relationships (eg, create a database before creating
    # a slice)
    sorter = TopologicalSorter()
    for model in models:
        inspector = inspect(model)
        dependent_tables: List[str] = []
        for column in inspector.columns.values():
            for foreign_key in column.foreign_keys:
                dependent_tables.append(
                    foreign_key.target_fullname.split(".")[0])
        sorter.add(model.__tablename__, *dependent_tables)
    order = list(sorter.static_order())
    models.sort(key=lambda model: order.index(model.__tablename__))

    return models
Example #18
0
def topsort(graph):
    """
    Topologically sort a graph

    The graph should be of the form ``{node: [list of nodes], ...}``.
    """
    try:
        from graphlib import TopologicalSorter
        sorter = TopologicalSorter(graph)
        return list(sorter.static_order())
    except ImportError:
        # TODO: Can be removed when we depend on Python >= 3.9
        # make a copy so as not to destroy original
        graph = dict((k, copy(v)) for k, v in graph.items())
        # Use the standard algorithm for topological sorting:
        # http://en.wikipedia.org/wiki/Topological_sorting
        # List that will contain the sorted elements
        sorted_items = []
        # set of all nodes with no incoming edges:
        no_incoming = {
            node
            for node, edges in graph.items() if len(edges) == 0
        }

        while len(no_incoming):
            n = no_incoming.pop()
            sorted_items.append(n)
            # find nodes m with edges to n
            outgoing = [m for m, edges in graph.items() if n in edges]
            for m in outgoing:
                graph[m].remove(n)
                if len(graph[m]) == 0:
                    # no other dependencies
                    no_incoming.add(m)

        if any([len(edges) > 0 for edges in graph.values()]):
            raise ValueError('Cannot topologically sort cyclic graph.')

        return sorted_items
Example #19
0
def _clean_places(ancestry: Ancestry) -> None:
    places = ancestry.entities[Place]

    def _extend_place_graph(graph: Dict, enclosing_place: Place) -> None:
        enclosures = enclosing_place.encloses
        # Ensure each place appears in the graph, even if they're anonymous.
        graph.setdefault(enclosing_place, set())
        for enclosure in enclosures:
            enclosed_place = enclosure.encloses
            seen_enclosed_place = enclosed_place in graph
            graph[enclosing_place].add(enclosed_place)
            if not seen_enclosed_place:
                _extend_place_graph(graph, enclosed_place)

    places_graph = defaultdict(set)
    for place in places:
        _extend_place_graph(places_graph, place)

    for place in TopologicalSorter(places_graph).static_order():
        _clean_place(ancestry, place)
Example #20
0
def count_containing_bags(rules: Rules, central_color: str) -> int:
    """
    Counts the number of bags which are contained
    within a bag of the given central color.
    """
    adjlist = extract_adjlist(rules)
    contained_bags = reachable_nodes(adjlist, central_color)

    # TopologicalSorter requires the graph in the reversed direction
    reversed_adjlist = {
        k: v
        for k, v in extract_reversed_adjlist(rules).items()
        if k in contained_bags
    }

    total_counts = collections.Counter()
    total_counts[central_color] = 1
    for curr in TopologicalSorter(reversed_adjlist).static_order():
        for color, count in rules[curr].items():
            total_counts[color] += count * total_counts[curr]

    return sum(total_counts.values()) - 1
Example #21
0
import fileinput as fi
import heapq
from graphlib import TopologicalSorter

ts = TopologicalSorter()

for line in fi.input():
    pr = line.split(" ")
    a, b = pr[1], pr[-3]
    ts.add(b, a)

ts.prepare()

EXTRA = 60

# Worker heaps
IDLE = 5
BUSY = []

Q = list(ts.get_ready())
heapq.heapify(Q)

t_min = 0
while ts.is_active():
    # While there are tasks and idle workers, we hand out tasks.
    while Q and IDLE > 0:
        c = heapq.heappop(Q)
        IDLE -= 1

        heapq.heappush(BUSY, (t_min + ord(c) - (ord("A") - 1) + EXTRA, c))
Example #22
0
            logger.error(f'Failed to load %s', pkgbase)
            traceback.print_exc()

    failed = [i.key for i in Status.objects.filter(status='ERROR')]
    for i in failed:
        recursively_fail(dependency_graph, reversed_dependency_graph, i)

    staled = [i.key for i in Status.objects.filter(status='STALED')]
    for i in staled:
        recursively_skip(dependency_graph, reversed_dependency_graph, i)

    building = [i.key for i in Status.objects.filter(status='BUILDING')]
    for i in building:
        recursively_skip(dependency_graph, reversed_dependency_graph, i)

    sorter = TopologicalSorter(dependency_graph)
    order = list(sorter.static_order())
    order = [i for i in order if i in staled]

    resources = config['scheduler']

    for group in resources.keys():
        if group == 'default':
            continue
        resources[group]['used'] = Status.objects.filter(
            detail__startswith=group).count()
        logger.info('%s: %d / %d', group, resources[group]['used'],
                    resources[group]['total'])

    for i in order:
        if i == 'dummy':
        'myprog': ['extlib1', 'fmtlib', 'urllib', 'mylib'],
        'mylib': ['fmtlib', 'stdlib'],
    }

    do_all_tasks(graph, task_queue, done_queue)

    for i in range(NUMBER_OF_PROCESSES):
        task_queue.put('STOP')


def do_all_tasks(graph, task_queue, done_queue):
    topological_sorter = TopologicalSorter(graph)
    topological_sorter.prepare()
    while topological_sorter.is_active():
        for node in topological_sorter.get_ready():
            task_queue.put(node)
        node = done_queue.get()
        topological_sorter.done(node)


if __name__ == '__main__':
    dependencies = {
        'scipy': {'numpy'},
        'pandas': {'numpy', 'scipy', 'requests'},
        'requests': {'urllib3'},
    }

    from graphlib import TopologicalSorter
    ts = TopologicalSorter(dependencies)
    print(list(ts.static_order()))
Example #24
0
from graphlib import TopologicalSorter

#   LIBRARY     mapped_to   LIBRARY DEPENDENCIES
data = {
    'des_system_lib': set('std synopsys std_cell_lib des_system_lib dw02 dw01 ramlib ieee'.split()),
    'dw01': set('ieee dw01 dware gtech'.split()),
    'dw02': set('ieee dw02 dware'.split()),
    'dw03': set('std synopsys dware dw03 dw02 dw01 ieee gtech'.split()),
    'dw04': set('dw04 ieee dw01 dware gtech'.split()),
    'dw05': set('dw05 ieee dware'.split()),
    'dw06': set('dw06 ieee dware'.split()),
    'dw07': set('ieee dware'.split()),
    'dware': set('ieee dware'.split()),
    'gtech': set('ieee gtech'.split()),
    'ramlib': set('std ieee'.split()),
    'std_cell_lib': set('ieee std_cell_lib'.split()),
    'synopsys': set(),
}
# Ignore self dependencies
for k, v in data.items():
    v.discard(k)

ts = TopologicalSorter(data)
print(tuple(ts.static_order()))
Example #25
0
from graphlib import TopologicalSorter

"""
             |- SW11
     |- SW2 -|- SW12
SW1 -|
     |- SW3 -|- SW13
             |- SW14

"""

topology = {
    "SW1": ["SW2", "SW3"],
    "SW2": ["SW11", "SW12"],
    "SW3": ["SW13", "SW14"]
}

top = TopologicalSorter(topology)
print(list(top.static_order()))
# ['SW11', 'SW12', 'SW13', 'SW14', 'SW2', 'SW3', 'SW1']
Example #26
0
    "Подготовка VM": [
        "Обновить Python до 3.9",
        "Клонировать репозиторий курса",
        "Установить pyneng.py",
    ],
    "Клонировать репозиторий курса": [
        "Подготовка репозитория",
    ],
    "Подготовка репозитория": [
        "Исправить ошибки в тестах",
        "Исправить ошибки в заданиях",
    ],
    "Исправить ошибки в тестах": [
        "Изменить в тестах порядок в assert",
        "Вынести общие функции в pyneng_common_functions",
    ],
    "Исправить ошибки в заданиях":
    ["Исправить ошибки в заданиях из списка todo"],
}

tasks = TopologicalSorter(tasks_dict)
pprint(list(tasks.static_order()))
[
    'Обновить Python до 3.9', 'Установить pyneng.py',
    'Изменить в тестах порядок в assert',
    'Вынести общие функции в pyneng_common_functions',
    'Исправить ошибки в заданиях из списка todo', 'Исправить ошибки в тестах',
    'Исправить ошибки в заданиях', 'Подготовка репозитория',
    'Клонировать репозиторий курса', 'Подготовка VM'
]
Example #27
0
    def to_dsl(self,
               provider,
               operations_graph,
               reversed_operations_graph,
               cluster_name,
               is_delete,
               artifacts=None,
               target_directory=None,
               inputs=None,
               outputs=None,
               extra=None,
               debug=False):
        if artifacts is None:
            artifacts = []
        if target_directory is None:
            target_directory = self.initial_artifacts_directory

        self.artifacts = {}
        for art in artifacts:
            self.artifacts[art[NAME]] = art

        provider_config = ProviderConfiguration(provider)
        ansible_config = provider_config.get_section(ANSIBLE)
        node_filter_config = provider_config.get_subsection(
            ANSIBLE, NODE_FILTER)

        ids_file_path = os.getcwd(
        ) + '/id_vars_' + cluster_name + self.get_artifact_extension()

        self.init_global_variables(inputs)

        operations_graph = self.init_graph(operations_graph)
        # the graph of operations at the moment is a dictionary of copies of ProviderTemplatre objects,
        # of the form Node/Relationship: {the set of opers of Nodes/Relationships on which it depends}
        elements = TopologicalSorter(operations_graph)
        # use TopologicalSorter for creating graph

        if is_delete:
            reversed_operations_graph = self.init_graph(
                reversed_operations_graph)
            elements = TopologicalSorter(reversed_operations_graph)

        elements.prepare()
        # first operations from on top of the graph in state 'ready'

        ansible_playbook = []
        if not debug:
            self.prepare_for_run()
        # function for initializing tmp clouni directory
        q = Queue()
        # queue for node names + operations
        active = []
        # list of parallel active operations
        first = True

        while elements.is_active():
            node_name = None
            # try to get new finished operation from queue and find it in list of active
            # if get - mark done this operation (but before it find in graph)
            # if ready operations exists - get it and execute, remove from active
            try:
                node_name = q.get_nowait()
            except:
                time.sleep(1)
            if node_name is not None:
                for node in active:
                    if node.name == node_name.split(
                            SEPARATOR
                    )[0] and node.operation == node_name.split(SEPARATOR)[1]:
                        active.remove(node)
                        elements.done(node)
            for v in elements.get_ready():
                # in delete mode we skip all operations exept delete and create operation transforms to delete
                if is_delete:
                    if v.operation == 'create':
                        v.operation = 'delete'
                    else:
                        elements.done(v)
                        continue
                logging.debug("Creating ansible play from operation: %s" %
                              v.name + ':' + v.operation)
                extra_tasks_for_delete = self.get_extra_tasks_for_delete(
                    v.type, v.name.replace('-', '_'), ids_file_path)
                description_prefix, module_prefix = self.get_module_prefixes(
                    is_delete, ansible_config)
                description_by_type = self.ansible_description_by_type(
                    v.type_name, description_prefix)
                module_by_type = self.ansible_module_by_type(
                    v.type_name, module_prefix)
                ansible_play_for_elem = dict(name=description_prefix + ' ' +
                                             provider + ' cluster: ' + v.name +
                                             ':' + v.operation,
                                             hosts=self.default_host,
                                             tasks=[])
                # reload id_vars file
                if not is_delete and first:
                    first = False
                    ansible_play_for_elem['tasks'].append(
                        copy.deepcopy(
                            {FILE: {
                                PATH: ids_file_path,
                                STATE: 'absent'
                            }}))
                    ansible_play_for_elem['tasks'].append(
                        copy.deepcopy(
                            {FILE: {
                                PATH: ids_file_path,
                                STATE: 'touch'
                            }}))
                # create playbook for every operation
                if v.operation == 'delete':
                    if not v.is_software_component:
                        ansible_play_for_elem['tasks'].append(
                            copy.deepcopy({'include_vars': ids_file_path}))
                        ansible_tasks = self.get_ansible_tasks_for_delete(
                            v,
                            description_by_type,
                            module_by_type,
                            additional_args=extra)
                        ansible_tasks.extend(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra))
                        if not any(item == module_by_type
                                   for item in ansible_config.get(
                                       'modules_skipping_delete', [])):
                            ansible_play_for_elem['tasks'].extend(
                                copy.deepcopy(ansible_tasks))
                elif v.operation == 'create':
                    if not v.is_software_component:
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(
                                self.get_ansible_tasks_for_inputs(inputs)))
                        ansible_tasks = self.get_ansible_tasks_for_create(
                            v,
                            target_directory,
                            node_filter_config,
                            description_by_type,
                            module_by_type,
                            additional_args=extra)
                        ansible_tasks.extend(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra))

                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(ansible_tasks))
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(extra_tasks_for_delete))

                    else:
                        ansible_play_for_elem['hosts'] = v.host
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(
                                self.get_ansible_tasks_from_interface(
                                    v,
                                    target_directory,
                                    is_delete,
                                    v.operation,
                                    cluster_name,
                                    additional_args=extra)))
                else:
                    (_, element_type, _) = utils.tosca_type_parse(v.type)
                    if element_type == NODES:
                        if v.is_software_component:
                            ansible_play_for_elem['hosts'] = v.host
                    # operations for relationships executes on target/source host depends on operation
                    elif element_type == RELATIONSHIPS:
                        if v.operation == 'pre_configure_target' or v.operation == 'post_configure_target' or v.operation == 'add_source':
                            for elem in operations_graph:
                                if elem.name == v.target:
                                    if elem.is_software_component:
                                        ansible_play_for_elem[
                                            'hosts'] = elem.host
                                    break
                        elif v.operation == 'pre_configure_source' or v.operation == 'post_configure_source':
                            for elem in operations_graph:
                                if elem.name == v.source:
                                    if elem.is_software_component:
                                        ansible_play_for_elem[
                                            'hosts'] = elem.host
                                    break
                        else:
                            logging.error(
                                "Unsupported operation for relationship in operation graph"
                            )
                            sys.exit(1)
                    else:
                        logging.error(
                            "Unsupported element type in operation graph")
                        sys.exit(1)
                    ansible_play_for_elem['tasks'].extend(
                        copy.deepcopy(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra)))
                ansible_playbook.append(ansible_play_for_elem)
                # run playbooks
                if not debug:
                    self.parallel_run([ansible_play_for_elem], v.name,
                                      v.operation, q, cluster_name)
                    # add element to active list
                    active.append(v)
                else:
                    elements.done(v)
        if is_delete:
            last_play = dict(name='Renew id_vars_example.yaml',
                             hosts=self.default_host,
                             tasks=[])
            last_play['tasks'].append(
                copy.deepcopy({FILE: {
                    PATH: ids_file_path,
                    STATE: 'absent'
                }}))
            if not debug:
                self.parallel_run([last_play], None, None, q, cluster_name)
                done = q.get()
                if done != 'Done':
                    logging.error("Something wrong with multiprocessing queue")
                    sys.exit(1)
            ansible_playbook.append(last_play)
        # delete dir with cluster_name in tmp clouni dir
        if not debug:
            rmtree(os.path.join(utils.get_tmp_clouni_dir(), cluster_name))
        return yaml.dump(ansible_playbook,
                         default_flow_style=False,
                         sort_keys=False)
from graphlib import TopologicalSorter

table_references = {
    "customers": set(),
    "accounts": {"customers"},
    "products": set(),
    "orders": {"accounts", "customers"},
    "order_products": {"orders", "products"},
}

if __name__ == "__main__":
    sorter = TopologicalSorter(table_references)
    print(list(sorter.static_order()))
Example #29
0
 def iter_results(toposorter: graphlib.TopologicalSorter):
     while toposorter.is_active():
         ready_tasks = tuple(toposorter.get_ready())
         toposorter.done(*ready_tasks)
         yield ready_tasks
Example #30
0
    def get_op_order(self):
        ts = TopologicalSorter()

        for host in self.inventory:
            for i, op_hash in enumerate(host.op_hash_order):
                if not i:
                    ts.add(op_hash)
                else:
                    ts.add(op_hash, host.op_hash_order[i - 1])

        final_op_order = []

        ts.prepare()

        while ts.is_active():
            # Ensure that where we have multiple different operations that can be executed in any
            # dependency order we order them by line numbers.
            node_group = sorted(
                ts.get_ready(),
                key=lambda op_hash: self.op_meta[op_hash]["op_order"],
            )
            ts.done(*node_group)
            final_op_order.extend(node_group)

        return final_op_order