class AstuteGraph(object): """This object stores logic that required for working with astute""" def __init__(self, cluster, tasks=None): self.cluster = cluster self.tasks = tasks or \ objects.Cluster.get_deployment_tasks(cluster) self.graph = GraphSolver() self.graph.add_tasks(self.tasks) self.serializers = TaskSerializers() def only_tasks(self, task_ids): self.graph.only_tasks(task_ids) def reexecutable_tasks(self, task_filter): self.graph.reexecutable_tasks(task_filter) def group_nodes_by_roles(self, nodes): """Group nodes by roles :param nodes: list of node db object :param roles: list of roles names :returns: dict of {role_name: nodes_list} pairs """ res = defaultdict(list) for node in nodes: res[node['role']].append(node) return res def get_nodes_with_roles(self, grouped_nodes, roles): """Returns nodes with provided roles. :param grouped_nodes: sorted nodes by role keys :param roles: list of roles :returns: list of nodes (dicts) """ result = [] for role in roles: if role in grouped_nodes: result.extend(grouped_nodes[role]) return result def assign_parallel_nodes(self, priority, nodes): """Assign parallel nodes It is possible that same node have 2 or more roles that can be deployed in parallel. We can not allow it. That is why priorities will be assigned in chunks :params priority: PriorityStrategy instance :params nodes: list of serialized nodes (dicts) """ current_nodes = nodes while current_nodes: next_nodes = [] group = [] added_uids = [] for node in current_nodes: if 'uid' not in node or 'role' not in node: raise errors.InvalidSerializedNode( 'uid and role are mandatory fields. Node: {0}'.format( node)) if node['uid'] not in added_uids: group.append(node) added_uids.append(node['uid']) else: next_nodes.append(node) priority.in_parallel(group) current_nodes = next_nodes def process_parallel_nodes(self, priority, parallel_groups, grouped_nodes): """Process both types of parallel deployment nodes :param priority: PriorityStrategy instance :param parallel_groups: list of dict objects :param grouped_nodes: dict with {role: nodes} mapping """ parallel_nodes = [] for group in parallel_groups: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) if 'amount' in group['parameters']['strategy']: priority.in_parallel_by( nodes, group['parameters']['strategy']['amount']) else: parallel_nodes.extend(nodes) if parallel_nodes: # check assign_parallel_nodes docstring for explanation self.assign_parallel_nodes(priority, parallel_nodes) def add_priorities(self, nodes): """Add priorities and tasks for all nodes :param nodes: list of node db object """ priority = ps.PriorityStrategy() groups_subgraph = self.graph.get_groups_subgraph() # get list with names ['controller', 'compute', 'cinder'] all_groups = groups_subgraph.nodes() grouped_nodes = self.group_nodes_by_roles(nodes) # if there is no nodes with some roles - mark them as success roles processed_groups = set(all_groups) - set(grouped_nodes.keys()) current_groups = groups_subgraph.get_next_groups(processed_groups) while current_groups: one_by_one = [] parallel = [] for r in current_groups: group = self.graph.node[r] if (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.one_by_one): one_by_one.append(group) elif (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.parallel): parallel.append(group) for group in one_by_one: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) priority.one_by_one(nodes) self.process_parallel_nodes(priority, parallel, grouped_nodes) # fetch next part of groups processed_groups.update(current_groups) current_groups = groups_subgraph.get_next_groups(processed_groups) def stage_tasks_serialize(self, tasks, nodes): """Serialize tasks for certain stage :param stage: oneof consts.STAGES :param nodes: list of node db objects """ serialized = [] role_resolver = RoleResolver(nodes) for task in tasks: if self.graph.should_exclude_task(task['id']): continue serializer = self.serializers.get_stage_serializer(task)( task, self.cluster, nodes, role_resolver=role_resolver) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) return serialized def post_tasks_serialize(self, nodes): """Serialize tasks for post_deployment hook :param nodes: list of node db objects """ if 'deploy_end' in self.graph: subgraph = self.graph.find_subgraph(start='deploy_end') else: errors.NotEnoughInformation('*deploy_end* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def pre_tasks_serialize(self, nodes): """Serialize tasks for pre_deployment hook :param nodes: list of node db objects """ if 'deploy_start' in self.graph: subgraph = self.graph.find_subgraph(end='deploy_start') else: raise errors.NotEnoughInformation( '*deploy_start* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def deploy_task_serialize(self, node): """Serialize tasks with necessary for orchestrator attributes :param node: dict with serialized node """ serialized = [] priority = ps.PriorityStrategy() for task in self.graph.get_group_tasks(node['role']): serializer = self.serializers.get_deploy_serializer(task)( task, self.cluster, node) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) priority.one_by_one(serialized) return serialized def check(self): if not self.graph.is_acyclic(): err = "Graph cannot be processed because it contains cycles in it:" # FIXME(mattymo): GraphSolver cannot be used to call this method err += ', '.join( six.moves.map(str, nx.simple_cycles(nx.DiGraph(self.graph)))) err += '\n' raise errors.InvalidData(err) non_existing_tasks = [] invalid_tasks = [] for node_key, node_value in six.iteritems(self.graph.node): if not node_value.get('id'): successors = self.graph.successors(node_key) predecessors = self.graph.predecessors(node_key) neighbors = successors + predecessors non_existing_tasks.append(node_key) invalid_tasks.extend(neighbors) if non_existing_tasks: raise errors.InvalidData( "Tasks '{non_existing_tasks}' can't be in requires" "|required_for|groups|tasks for [{invalid_tasks}]" " because they don't exist in the graph".format( non_existing_tasks=', '.join( str(x) for x in sorted(non_existing_tasks)), invalid_tasks=', '.join( str(x) for x in sorted(set(invalid_tasks)))))
class AstuteGraph(object): """This object stores logic that required for working with astute orchestrator. """ def __init__(self, cluster): self.cluster = cluster self.tasks = objects.Cluster.get_deployment_tasks(cluster) self.graph = DeploymentGraph() self.graph.add_tasks(self.tasks) self.serializers = TaskSerializers() def only_tasks(self, task_ids): self.graph.only_tasks(task_ids) def group_nodes_by_roles(self, nodes): """Group nodes by roles :param nodes: list of node db object :param roles: list of roles names :returns: dict of {role_name: nodes_list} pairs """ res = defaultdict(list) for node in nodes: res[node['role']].append(node) return res def get_nodes_with_roles(self, grouped_nodes, roles): """Returns nodes with provided roles. :param grouped_nodes: sorted nodes by role keys :param roles: list of roles :returns: list of nodes (dicts) """ result = [] for role in roles: if role in grouped_nodes: result.extend(grouped_nodes[role]) return result def assign_parallel_nodes(self, priority, nodes): """It is possible that same node have 2 or more roles that can be deployed in parallel. We can not allow it. That is why priorities will be assigned in chunks :params priority: PriorityStrategy instance :params nodes: list of serialized nodes (dicts) """ current_nodes = nodes while current_nodes: next_nodes = [] group = [] added_uids = [] for node in current_nodes: if 'uid' not in node or 'role' not in node: raise errors.InvalidSerializedNode( 'uid and role are mandatory fields. Node: {0}'.format( node)) if node['uid'] not in added_uids: group.append(node) added_uids.append(node['uid']) else: next_nodes.append(node) priority.in_parallel(group) current_nodes = next_nodes def process_parallel_nodes(self, priority, parallel_groups, grouped_nodes): """Process both types of parallel deployment nodes :param priority: PriorityStrategy instance :param parallel_groups: list of dict objects :param grouped_nodes: dict with {role: nodes} mapping """ parallel_nodes = [] for group in parallel_groups: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) if 'amount' in group['parameters']['strategy']: priority.in_parallel_by( nodes, group['parameters']['strategy']['amount']) else: parallel_nodes.extend(nodes) if parallel_nodes: # check assign_parallel_nodes docstring for explanation self.assign_parallel_nodes(priority, parallel_nodes) def add_priorities(self, nodes): """Add priorities and tasks for all nodes :param nodes: list of node db object """ priority = ps.PriorityStrategy() groups_subgraph = self.graph.get_groups_subgraph() # get list with names ['controller', 'compute', 'cinder'] all_groups = groups_subgraph.nodes() grouped_nodes = self.group_nodes_by_roles(nodes) # if there is no nodes with some roles - mark them as success roles processed_groups = set(all_groups) - set(grouped_nodes.keys()) current_groups = groups_subgraph.get_next_groups(processed_groups) while current_groups: one_by_one = [] parallel = [] for r in current_groups: group = self.graph.node[r] if (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.one_by_one): one_by_one.append(group) elif (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.parallel): parallel.append(group) for group in one_by_one: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) priority.one_by_one(nodes) self.process_parallel_nodes(priority, parallel, grouped_nodes) # fetch next part of groups processed_groups.update(current_groups) current_groups = groups_subgraph.get_next_groups(processed_groups) def stage_tasks_serialize(self, tasks, nodes): """Serialize tasks for certain stage :param stage: oneof consts.STAGES :param nodes: list of node db objects """ serialized = [] for task in tasks: if self.graph.should_exclude_task(task['id']): continue serializer = self.serializers.get_stage_serializer(task)( task, self.cluster, nodes) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) return serialized def post_tasks_serialize(self, nodes): """Serialize tasks for post_deployment hook :param nodes: list of node db objects """ if 'deploy_end' in self.graph: subgraph = self.graph.find_subgraph(start='deploy_end') else: errors.NotEnoughInformation('*deploy_end* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def pre_tasks_serialize(self, nodes): """Serialize tasks for pre_deployment hook :param nodes: list of node db objects """ if 'deploy_start' in self.graph: subgraph = self.graph.find_subgraph(end='deploy_start') else: raise errors.NotEnoughInformation( '*deploy_start* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def deploy_task_serialize(self, node): """Serialize tasks with necessary for orchestrator attributes :param node: dict with serialized node """ serialized = [] priority = ps.PriorityStrategy() for task in self.graph.get_group_tasks(node['role']): serializer = self.serializers.get_deploy_serializer(task)( task, self.cluster, node) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) priority.one_by_one(serialized) return serialized
class AstuteGraph(object): """This object stores logic that required for working with astute orchestrator. """ def __init__(self, cluster): self.cluster = cluster self.tasks = objects.Cluster.get_deployment_tasks(cluster) self.graph = DeploymentGraph() self.graph.add_tasks(self.tasks) self.serializers = TaskSerializers() def only_tasks(self, task_ids): self.graph.only_tasks(task_ids) def group_nodes_by_roles(self, nodes): """Group nodes by roles :param nodes: list of node db object :param roles: list of roles names :returns: dict of {role_name: nodes_list} pairs """ res = defaultdict(list) for node in nodes: res[node['role']].append(node) return res def get_nodes_with_roles(self, grouped_nodes, roles): """Returns nodes with provided roles. :param grouped_nodes: sorted nodes by role keys :param roles: list of roles :returns: list of nodes (dicts) """ result = [] for role in roles: if role in grouped_nodes: result.extend(grouped_nodes[role]) return result def assign_parallel_nodes(self, priority, nodes): """It is possible that same node have 2 or more roles that can be deployed in parallel. We can not allow it. That is why priorities will be assigned in chunks :params priority: PriorityStrategy instance :params nodes: list of serialized nodes (dicts) """ current_nodes = nodes while current_nodes: next_nodes = [] group = [] added_uids = [] for node in current_nodes: if 'uid' not in node or 'role' not in node: raise errors.InvalidSerializedNode( 'uid and role are mandatory fields. Node: {0}'.format( node)) if node['uid'] not in added_uids: group.append(node) added_uids.append(node['uid']) else: next_nodes.append(node) priority.in_parallel(group) current_nodes = next_nodes def process_parallel_nodes(self, priority, parallel_groups, grouped_nodes): """Process both types of parallel deployment nodes :param priority: PriorityStrategy instance :param parallel_groups: list of dict objects :param grouped_nodes: dict with {role: nodes} mapping """ parallel_nodes = [] for group in parallel_groups: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) if 'amount' in group['parameters']['strategy']: priority.in_parallel_by( nodes, group['parameters']['strategy']['amount']) else: parallel_nodes.extend(nodes) if parallel_nodes: # check assign_parallel_nodes docstring for explanation self.assign_parallel_nodes(priority, parallel_nodes) def add_priorities(self, nodes): """Add priorities and tasks for all nodes :param nodes: list of node db object """ priority = ps.PriorityStrategy() groups_subgraph = self.graph.get_groups_subgraph() # get list with names ['controller', 'compute', 'cinder'] all_groups = groups_subgraph.nodes() grouped_nodes = self.group_nodes_by_roles(nodes) # if there is no nodes with some roles - mark them as success roles processed_groups = set(all_groups) - set(grouped_nodes.keys()) current_groups = groups_subgraph.get_next_groups(processed_groups) while current_groups: one_by_one = [] parallel = [] for r in current_groups: group = self.graph.node[r] if (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.one_by_one): one_by_one.append(group) elif (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.parallel): parallel.append(group) for group in one_by_one: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) priority.one_by_one(nodes) self.process_parallel_nodes(priority, parallel, grouped_nodes) # fetch next part of groups processed_groups.update(current_groups) current_groups = groups_subgraph.get_next_groups(processed_groups) def stage_tasks_serialize(self, tasks, nodes): """Serialize tasks for certain stage :param stage: oneof consts.STAGES :param nodes: list of node db objects """ serialized = [] for task in tasks: if self.graph.should_exclude_task(task['id']): continue serializer = self.serializers.get_stage_serializer(task)( task, self.cluster, nodes) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) return serialized def post_tasks_serialize(self, nodes): """Serialize tasks for post_deployment hook :param nodes: list of node db objects """ if 'deploy_end' in self.graph: subgraph = self.graph.find_subgraph(start='deploy_end') else: errors.NotEnoughInformation( '*deploy_end* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def pre_tasks_serialize(self, nodes): """Serialize tasks for pre_deployment hook :param nodes: list of node db objects """ if 'deploy_start' in self.graph: subgraph = self.graph.find_subgraph(end='deploy_start') else: raise errors.NotEnoughInformation( '*deploy_start* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def deploy_task_serialize(self, node): """Serialize tasks with necessary for orchestrator attributes :param node: dict with serialized node """ serialized = [] priority = ps.PriorityStrategy() for task in self.graph.get_group_tasks(node['role']): serializer = self.serializers.get_deploy_serializer(task)( task, self.cluster, node) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) priority.one_by_one(serialized) return serialized
class AstuteGraph(object): """This object stores logic that required for working with astute""" def __init__(self, cluster, tasks=None): self.cluster = cluster self.tasks = tasks or \ objects.Cluster.get_deployment_tasks(cluster) self.graph = GraphSolver() self.graph.add_tasks(self.tasks) self.serializers = TaskSerializers() def only_tasks(self, task_ids): self.graph.only_tasks(task_ids) def reexecutable_tasks(self, task_filter): self.graph.reexecutable_tasks(task_filter) def group_nodes_by_roles(self, nodes): """Group nodes by roles :param nodes: list of node db object :param roles: list of roles names :returns: dict of {role_name: nodes_list} pairs """ res = defaultdict(list) for node in nodes: res[node['role']].append(node) return res def get_nodes_with_roles(self, grouped_nodes, roles): """Returns nodes with provided roles. :param grouped_nodes: sorted nodes by role keys :param roles: list of roles :returns: list of nodes (dicts) """ result = [] for role in roles: if role in grouped_nodes: result.extend(grouped_nodes[role]) return result def assign_parallel_nodes(self, priority, nodes): """Assign parallel nodes It is possible that same node have 2 or more roles that can be deployed in parallel. We can not allow it. That is why priorities will be assigned in chunks :params priority: PriorityStrategy instance :params nodes: list of serialized nodes (dicts) """ current_nodes = nodes while current_nodes: next_nodes = [] group = [] added_uids = [] for node in current_nodes: if 'uid' not in node or 'role' not in node: raise errors.InvalidSerializedNode( 'uid and role are mandatory fields. Node: {0}'.format( node)) if node['uid'] not in added_uids: group.append(node) added_uids.append(node['uid']) else: next_nodes.append(node) priority.in_parallel(group) current_nodes = next_nodes def process_parallel_nodes(self, priority, parallel_groups, grouped_nodes): """Process both types of parallel deployment nodes :param priority: PriorityStrategy instance :param parallel_groups: list of dict objects :param grouped_nodes: dict with {role: nodes} mapping """ parallel_nodes = [] for group in parallel_groups: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) if 'amount' in group['parameters']['strategy']: priority.in_parallel_by( nodes, group['parameters']['strategy']['amount']) else: parallel_nodes.extend(nodes) if parallel_nodes: # check assign_parallel_nodes docstring for explanation self.assign_parallel_nodes(priority, parallel_nodes) def add_priorities(self, nodes): """Add priorities and tasks for all nodes :param nodes: list of node db object """ priority = ps.PriorityStrategy() groups_subgraph = self.graph.get_groups_subgraph() # get list with names ['controller', 'compute', 'cinder'] all_groups = groups_subgraph.nodes() grouped_nodes = self.group_nodes_by_roles(nodes) # if there is no nodes with some roles - mark them as success roles processed_groups = set(all_groups) - set(grouped_nodes.keys()) current_groups = groups_subgraph.get_next_groups(processed_groups) while current_groups: one_by_one = [] parallel = [] for r in current_groups: group = self.graph.node[r] if (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.one_by_one): one_by_one.append(group) elif (group['parameters']['strategy']['type'] == consts.DEPLOY_STRATEGY.parallel): parallel.append(group) for group in one_by_one: nodes = self.get_nodes_with_roles(grouped_nodes, group['role']) priority.one_by_one(nodes) self.process_parallel_nodes(priority, parallel, grouped_nodes) # fetch next part of groups processed_groups.update(current_groups) current_groups = groups_subgraph.get_next_groups(processed_groups) def stage_tasks_serialize(self, tasks, nodes): """Serialize tasks for certain stage :param stage: oneof consts.STAGES :param nodes: list of node db objects """ serialized = [] role_resolver = RoleResolver(nodes) for task in tasks: if self.graph.should_exclude_task(task['id']): continue serializer = self.serializers.get_stage_serializer(task)( task, self.cluster, nodes, role_resolver=role_resolver) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) return serialized def post_tasks_serialize(self, nodes): """Serialize tasks for post_deployment hook :param nodes: list of node db objects """ if 'deploy_end' in self.graph: subgraph = self.graph.find_subgraph(start='deploy_end') else: errors.NotEnoughInformation( '*deploy_end* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def pre_tasks_serialize(self, nodes): """Serialize tasks for pre_deployment hook :param nodes: list of node db objects """ if 'deploy_start' in self.graph: subgraph = self.graph.find_subgraph(end='deploy_start') else: raise errors.NotEnoughInformation( '*deploy_start* stage must be provided') return self.stage_tasks_serialize(subgraph.topology, nodes) def deploy_task_serialize(self, node): """Serialize tasks with necessary for orchestrator attributes :param node: dict with serialized node """ serialized = [] priority = ps.PriorityStrategy() for task in self.graph.get_group_tasks(node['role']): serializer = self.serializers.get_deploy_serializer(task)( task, self.cluster, node) if not serializer.should_execute(): continue serialized.extend(serializer.serialize()) priority.one_by_one(serialized) return serialized def check(self): if not self.graph.is_acyclic(): err = "Graph cannot be processed because it contains cycles in it:" # FIXME(mattymo): GraphSolver cannot be used to call this method err += ', '.join(six.moves.map(str, nx.simple_cycles( nx.DiGraph(self.graph)))) err += '\n' raise errors.InvalidData(err) non_existing_tasks = [] invalid_tasks = [] for node_key, node_value in six.iteritems(self.graph.node): if not node_value.get('id'): successors = self.graph.successors(node_key) predecessors = self.graph.predecessors(node_key) neighbors = successors + predecessors non_existing_tasks.append(node_key) invalid_tasks.extend(neighbors) if non_existing_tasks: raise errors.InvalidData( "Tasks '{non_existing_tasks}' can't be in requires" "|required_for|groups|tasks for [{invalid_tasks}]" " because they don't exist in the graph".format( non_existing_tasks=', '.join( str(x) for x in sorted(non_existing_tasks)), invalid_tasks=', '.join( str(x) for x in sorted(set(invalid_tasks)))))