def get_cluster_plugins_with_tasks(cls, cluster): cluster_plugins = [] for plugin_db in ClusterPlugins.get_enabled(cluster.id): plugin_adapter = wrap_plugin(plugin_db) plugin_adapter.set_cluster_tasks() cluster_plugins.append(plugin_adapter) return cluster_plugins
def get_plugins_node_roles(cls, cluster): result = {} core_roles = set(cluster.release.roles_metadata) for plugin_db in ClusterPlugins.get_enabled(cluster.id): plugin_roles = wrap_plugin(plugin_db).normalized_roles_metadata # we should check all possible cases of roles intersection # with core ones and those from other plugins # and afterwards show them in error message; # thus role names for which following checks # fails are accumulated in err_info variable err_roles = set( r for r in plugin_roles if r in core_roles or r in result ) if err_roles: raise errors.AlreadyExists( "Plugin (ID={0}) is unable to register the following " "node roles: {1}".format(plugin_db.id, ", ".join(sorted(err_roles))) ) # update info on processed roles in case of # success of all intersection checks result.update(plugin_roles) return result
def get_network_roles(cls, cluster, merge_policy): """Returns the network roles from plugins. The roles cluster and plugins will be mixed according to merge policy. """ instance_roles = cluster.release.network_roles_metadata all_roles = dict((role['id'], role) for role in instance_roles) conflict_roles = dict() for plugin in ClusterPlugins.get_enabled(cluster.id): for role in plugin.network_roles_metadata: role_id = role['id'] if role_id in all_roles: try: merge_policy.apply_patch( all_roles[role_id], role ) except errors.UnresolvableConflict as e: logger.error("cannot merge plugin {0}: {1}" .format(plugin.name, e)) conflict_roles[role_id] = plugin.name else: all_roles[role_id] = role if conflict_roles: raise errors.NetworkRoleConflict( "Cannot override existing network roles: '{0}' in " "plugins: '{1}'".format( ', '.join(conflict_roles), ', '.join(set(conflict_roles.values())))) return list(all_roles.values())
def _success_action(cls, task, status, progress): # check if all nodes are ready if any(map(lambda n: n.status == 'error', task.cluster.nodes)): cls._error_action(task, 'error', 100) return task_name = task.name.title() try: message = (u"{0} of environment '{1}' is done. ").format( task_name, task.cluster.name, ) except Exception as exc: logger.error(": ".join([str(exc), traceback.format_exc()])) message = u"{0} of environment '{1}' is done".format( task_name, task.cluster.name) zabbix_url = objects.Cluster.get_network_manager( task.cluster).get_zabbix_url(task.cluster) if zabbix_url: message = "{0} Access Zabbix dashboard at {1}".format( message, zabbix_url) plugins_msg = cls._make_plugins_success_message( ClusterPlugins.get_enabled(task.cluster.id)) if plugins_msg: message = '{0}\n\n{1}'.format(message, plugins_msg) cls._notify(task, consts.NOTIFICATION_TOPICS.done, message) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data)
def _success_action(cls, task, status, progress): # check if all nodes are ready if any(map(lambda n: n.status == "error", task.cluster.nodes)): cls._error_action(task, "error", 100) return task_name = task.name.title() try: message = (u"{0} of environment '{1}' is done. ").format(task_name, task.cluster.name) except Exception as exc: logger.error(": ".join([str(exc), traceback.format_exc()])) message = u"{0} of environment '{1}' is done".format(task_name, task.cluster.name) zabbix_url = objects.Cluster.get_network_manager(task.cluster).get_zabbix_url(task.cluster) if zabbix_url: message = "{0} Access Zabbix dashboard at {1}".format(message, zabbix_url) plugins_msg = cls._make_plugins_success_message(ClusterPlugins.get_enabled(task.cluster.id)) if plugins_msg: message = "{0}\n\n{1}".format(message, plugins_msg) cls._notify(task, consts.NOTIFICATION_TOPICS.done, message) data = {"status": status, "progress": progress, "message": message} objects.Task.update(task, data)
def get_network_roles(cls, cluster, merge_policy): """Returns the network roles from plugins. The roles cluster and plugins will be mixed according to merge policy. """ instance_roles = cluster.release.network_roles_metadata all_roles = dict((role['id'], role) for role in instance_roles) conflict_roles = dict() for plugin in ClusterPlugins.get_enabled(cluster.id): for role in plugin.network_roles_metadata: role_id = role['id'] if role_id in all_roles: try: merge_policy.apply_patch(all_roles[role_id], role) except errors.UnresolvableConflict as e: logger.error("cannot merge plugin {0}: {1}".format( plugin.name, e)) conflict_roles[role_id] = plugin.name else: all_roles[role_id] = role if conflict_roles: raise errors.NetworkRoleConflict( "Cannot override existing network roles: '{0}' in " "plugins: '{1}'".format( ', '.join(conflict_roles), ', '.join(set(conflict_roles.values())))) return list(all_roles.values())
def get_plugins_node_roles(cls, cluster): result = {} core_roles = set(cluster.release.roles_metadata) for plugin_db in ClusterPlugins.get_enabled(cluster.id): plugin_roles = wrap_plugin(plugin_db).normalized_roles_metadata # we should check all possible cases of roles intersection # with core ones and those from other plugins # and afterwards show them in error message; # thus role names for which following checks # fails are accumulated in err_info variable err_roles = set(r for r in plugin_roles if r in core_roles or r in result) if err_roles: raise errors.AlreadyExists( "Plugin (ID={0}) is unable to register the following " "node roles: {1}".format(plugin_db.id, ", ".join(sorted(err_roles)))) # update info on processed roles in case of # success of all intersection checks result.update(plugin_roles) return result
def get_cluster_plugins_info(self, cluster): plugins_info = [] for plugin_inst in ClusterPlugins.get_enabled(cluster.id): plugin_info = self.get_attributes(plugin_inst.__dict__, self.plugin_info_white_list) plugins_info.append(plugin_info) return plugins_info
def get_cluster_plugins_info(self, cluster): plugins_info = [] for plugin_inst in ClusterPlugins.get_enabled(cluster.id): plugin_info = { "id": plugin_inst.id, "name": plugin_inst.name, "version": plugin_inst.version, "releases": plugin_inst.releases, "fuel_version": plugin_inst.fuel_version, "package_version": plugin_inst.package_version, } plugins_info.append(plugin_info) return plugins_info
def get_volumes_metadata(cls, cluster): """Get volumes metadata for cluster from all plugins which enabled it. :param cluster: A cluster instance :type cluster: Cluster model :return: dict -- Object with merged volumes data from plugins """ volumes_metadata = { 'volumes': [], 'volumes_roles_mapping': {}, 'rule_to_pick_boot_disk': [], } release_volumes = cluster.release.volumes_metadata.get('volumes', []) release_volumes_ids = [v['id'] for v in release_volumes] processed_volumes = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): metadata = plugin_adapter.volumes_metadata for volume in metadata.get('volumes', []): volume_id = volume['id'] if volume_id in release_volumes_ids: raise errors.AlreadyExists( 'Plugin {0} is overlapping with release ' 'by introducing the same volume with id "{1}"'.format( plugin_adapter.full_name, volume_id)) elif volume_id in processed_volumes: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same volume with id "{2}"'.format( plugin_adapter.full_name, processed_volumes[volume_id], volume_id)) processed_volumes[volume_id] = plugin_adapter.full_name volumes_metadata.get('volumes_roles_mapping', {}).update( metadata.get('volumes_roles_mapping', {})) volumes_metadata.get('volumes', []).extend(metadata.get('volumes', [])) volumes_metadata.get('rule_to_pick_boot_disk', []).extend( metadata.get('rule_to_pick_boot_disk', [])) return volumes_metadata
def get_volumes_metadata(cls, cluster): """Get volumes metadata for cluster from all plugins which enabled it. :param cluster: A cluster instance :type cluster: Cluster model :return: dict -- Object with merged volumes data from plugins """ volumes_metadata = { 'volumes': [], 'volumes_roles_mapping': {} } release_volumes = cluster.release.volumes_metadata.get('volumes', []) release_volumes_ids = [v['id'] for v in release_volumes] processed_volumes = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): metadata = plugin_adapter.volumes_metadata for volume in metadata.get('volumes', []): volume_id = volume['id'] if volume_id in release_volumes_ids: raise errors.AlreadyExists( 'Plugin {0} is overlapping with release ' 'by introducing the same volume with id "{1}"' .format(plugin_adapter.full_name, volume_id)) elif volume_id in processed_volumes: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same volume with id "{2}"' .format(plugin_adapter.full_name, processed_volumes[volume_id], volume_id)) processed_volumes[volume_id] = plugin_adapter.full_name volumes_metadata.get('volumes_roles_mapping', {}).update( metadata.get('volumes_roles_mapping', {})) volumes_metadata.get('volumes', []).extend( metadata.get('volumes', [])) return volumes_metadata
def get_plugins_deployment_tasks(cls, cluster): deployment_tasks = [] processed_tasks = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): depl_tasks = plugin_adapter.deployment_tasks for t in depl_tasks: t_id = t['id'] if t_id in processed_tasks: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same deployment task with ' 'id {2}'.format(plugin_adapter.full_name, processed_tasks[t_id], t_id)) processed_tasks[t_id] = plugin_adapter.full_name deployment_tasks.extend(depl_tasks) return deployment_tasks
def _success_action(cls, task, status, progress, nodes): # check if all nodes are ready if any(n.status == consts.NODE_STATUSES.error for n in nodes): cls._error_action(task, 'error', 100) return task_name = task.name.title() if nodes: # check that all nodes in same state remaining = objects.Cluster.get_nodes_count_unmet_status( nodes[0].cluster, nodes[0].status ) if remaining > 0: message = u"{0} of {1} environment node(s) is done.".format( task_name, len(nodes) ) else: message = u"{0} of environment '{1}' is done.".format( task_name, task.cluster.name ) else: message = u"{0} is done. No changes.".format(task_name) zabbix_url = objects.Cluster.get_network_manager( task.cluster ).get_zabbix_url(task.cluster) if zabbix_url: message = "{0} Access Zabbix dashboard at {1}".format( message, zabbix_url) if task.name != consts.TASK_NAMES.provision: plugins_msg = cls._make_plugins_success_message( ClusterPlugins.get_enabled(task.cluster.id)) if plugins_msg: message = '{0}\n\n{1}'.format(message, plugins_msg) cls._notify(task, consts.NOTIFICATION_TOPICS.done, message) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data)
def _success_action(cls, task, status, progress, nodes): # check if all nodes are ready if any(n.status == consts.NODE_STATUSES.error for n in nodes): cls._error_action(task, 'error', 100) return task_name = task.name.title() if nodes: # check that all nodes in same state remaining = objects.Cluster.get_nodes_count_unmet_status( nodes[0].cluster, nodes[0].status) if remaining > 0: message = u"{0} of {1} environment node(s) is done.".format( task_name, len(nodes)) else: message = u"{0} of environment '{1}' is done.".format( task_name, task.cluster.name) else: message = u"{0} is done. No changes.".format(task_name) zabbix_url = objects.Cluster.get_network_manager( task.cluster).get_zabbix_url(task.cluster) if zabbix_url: message = "{0} Access Zabbix dashboard at {1}".format( message, zabbix_url) if task.name != consts.TASK_NAMES.provision: plugins_msg = cls._make_plugins_success_message( ClusterPlugins.get_enabled(task.cluster.id)) if plugins_msg: message = '{0}\n\n{1}'.format(message, plugins_msg) cls._notify(task, consts.NOTIFICATION_TOPICS.done, message) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data)
def get_plugins_deployment_tasks(cls, cluster, graph_type=None): deployment_tasks = [] processed_tasks = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): depl_tasks = plugin_adapter.get_deployment_tasks(graph_type) for t in depl_tasks: t_id = t['id'] if t_id in processed_tasks: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same deployment task with ' 'id {2}' .format(plugin_adapter.full_name, processed_tasks[t_id], t_id) ) processed_tasks[t_id] = plugin_adapter.full_name deployment_tasks.extend(depl_tasks) return deployment_tasks
def get_enabled_plugins(cls, cluster): return [ wrap_plugin(plugin) for plugin in ClusterPlugins.get_enabled(cluster.id) ]
def get_enabled_plugins(cls, cluster): return [wrap_plugin(plugin) for plugin in ClusterPlugins.get_enabled(cluster.id)]