def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() error_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.error).all() all_envs_last_recs = \ OpenStackWorkloadStatsCollection.get_last_by_resource_type( resource_type) ready_or_error_ids = set([c.id for c in operational_clusters] + [c.id for c in error_clusters]) envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \ ready_or_error_ids # Clear current resource data for unavailable clusters. # Current OSWL data is cleared for those clusters which status is not # 'operational' nor 'error' or when cluster was removed. Data is # cleared for cluster only if it was updated recently (today or # yesterday). While this collector is running with interval much # smaller than one day it should not miss any unavailable cluster. for id in envs_ids_to_clear: oswl_statistics_save(id, resource_type, []) # Collect current OSWL data and update data in DB for cluster in operational_clusters: try: client_provider = helpers.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) version_info = utils.get_version_info(cluster) with utils.set_proxy(proxy_for_os_api): data = helpers.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data, version_info=version_info) except errors.StatsException as e: logger.error("Cannot collect OSWL resource {0} for cluster " "with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) except Exception as e: logger.exception("Error while collecting OSWL resource {0} " "for cluster with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}" .format(resource_type, six.text_type(e))) finally: db.remove()
def get_clusters_info(self): clusters = ClusterCollection.all() clusters_info = [] for cluster in clusters: release = cluster.release nodes_num = NodeCollection.filter_by( None, cluster_id=cluster.id).count() cluster_info = { 'id': cluster.id, 'nodes_num': nodes_num, 'release': { 'os': release.operating_system, 'name': release.name, 'version': release.version }, 'mode': cluster.mode, 'nodes': self.get_nodes_info(cluster.nodes), 'node_groups': self.get_node_groups_info(cluster.node_groups), 'status': cluster.status, 'attributes': self.get_attributes(cluster.attributes.editable, self.attributes_white_list), 'vmware_attributes': self.get_attributes( cluster.vmware_attributes.editable, self.vmware_attributes_white_list ), 'net_provider': cluster.net_provider, 'fuel_version': cluster.fuel_version, 'is_customized': cluster.is_customized, 'network_configuration': self.get_network_configuration_info( cluster), 'installed_plugins': self.get_cluster_plugins_info(cluster) } clusters_info.append(cluster_info) return clusters_info
def get_clusters_info(self): clusters = ClusterCollection.all() clusters_info = [] for cluster in clusters: release = cluster.release nodes_num = NodeCollection.filter_by( None, cluster_id=cluster.id).count() cluster_info = { 'id': cluster.id, 'nodes_num': nodes_num, 'release': { 'os': release.operating_system, 'name': release.name, 'version': release.version }, 'mode': cluster.mode, 'nodes': self.get_nodes_info(cluster.nodes), 'node_groups': self.get_node_groups_info(cluster.node_groups), 'status': cluster.status, 'attributes': self.get_attributes(cluster.attributes.editable), 'net_provider': cluster.net_provider, 'fuel_version': cluster.fuel_version, 'is_customized': cluster.is_customized, 'openstack_info': self.get_openstack_info(cluster, cluster.nodes), } clusters_info.append(cluster_info) return clusters_info
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = ClusterCollection.filter_by_not(None, id=instance.id) if ClusterCollection.filter_by(query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) for k in ("net_provider", ): if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True) return d
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = ClusterCollection.filter_by_not(None, id=instance.id) if ClusterCollection.filter_by(query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) for k in ("net_provider",): if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True ) return d
def get_clusters_info(self): clusters = ClusterCollection.all() clusters_info = [] for cluster in clusters: release = cluster.release nodes_num = NodeCollection.filter_by( None, cluster_id=cluster.id).count() vmware_attributes_editable = None if cluster.vmware_attributes: vmware_attributes_editable = cluster.vmware_attributes.editable cluster_info = { 'id': cluster.id, 'nodes_num': nodes_num, 'release': { 'os': release.operating_system, 'name': release.name, 'version': release.version }, 'mode': cluster.mode, 'nodes': self.get_nodes_info(cluster.nodes), 'node_groups': self.get_node_groups_info(cluster.node_groups), 'status': cluster.status, 'extensions': cluster.extensions, 'attributes': self.get_attributes(Cluster.get_editable_attributes(cluster), self.attributes_white_list), 'vmware_attributes': self.get_attributes(vmware_attributes_editable, self.vmware_attributes_white_list), 'plugin_links': self.get_plugin_links(cluster.plugin_links), 'net_provider': cluster.net_provider, 'fuel_version': cluster.fuel_version, 'is_customized': cluster.is_customized, 'network_configuration': self.get_network_configuration_info(cluster), 'installed_plugins': self.get_cluster_plugins_info(cluster), 'components': cluster.components, 'cluster_plugins': cluster.cluster_plugins } clusters_info.append(cluster_info) return clusters_info
def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) release_id = d.get("release", d.get("release_id", None)) if release_id: release = Release.get_by_uid(release_id) if not release: raise errors.InvalidData("Invalid release ID", log_message=True) return d
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData(u"Release ID is required", log_message=True) if "name" in d: if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) return d
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData( u"Release ID is required", log_message=True) if "name" in d: if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) return d
def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): if ClusterCollection.filter_by( query=None, name=d["name"] ).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) release_id = d.get("release", d.get("release_id", None)) if release_id: release = Release.get_by_uid(release_id) if not release: raise errors.InvalidData( "Invalid release ID", log_message=True ) return d
def get_clusters_info(self): clusters = ClusterCollection.all() clusters_info = [] for cluster in clusters: release = cluster.release nodes_num = NodeCollection.filter_by( None, cluster_id=cluster.id).count() cluster_info = { 'id': cluster.id, 'nodes_num': nodes_num, 'release': { 'os': release.operating_system, 'name': release.name, 'version': release.version }, 'mode': cluster.mode, 'nodes': self.get_nodes_info(cluster.nodes), 'status': cluster.status } clusters_info.append(cluster_info) return clusters_info
def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() for cluster in operational_clusters: client_provider = utils.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) with utils.set_proxy(proxy_for_os_api): data = utils.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}".format( resource_type, six.text_type(e))) finally: db.remove()
def get_clusters_info(self): clusters = ClusterCollection.all() clusters_info = [] for cluster in clusters: release = cluster.release nodes_num = NodeCollection.filter_by( None, cluster_id=cluster.id).count() cluster_info = { 'id': cluster.id, 'nodes_num': nodes_num, 'release': { 'os': release.operating_system, 'name': release.name, 'version': release.version }, 'mode': cluster.mode, 'nodes': self.get_nodes_info(cluster.nodes), 'node_groups': self.get_node_groups_info(cluster.node_groups), 'status': cluster.status, 'extensions': cluster.extensions, 'attributes': self.get_attributes( Cluster.get_editable_attributes(cluster), self.attributes_white_list ), 'plugin_links': self.get_plugin_links( cluster.plugin_links), 'net_provider': cluster.net_provider, 'fuel_version': cluster.fuel_version, 'is_customized': cluster.is_customized, 'network_configuration': self.get_network_configuration_info( cluster), 'installed_plugins': self.get_cluster_plugins_info(cluster), 'components': cluster.components, 'cluster_plugins': cluster.cluster_plugins, 'roles_metadata': cluster.roles_metadata, 'tags_metadata': cluster.tags_metadata, 'volumes_metadata': cluster.volumes_metadata, } clusters_info.append(cluster_info) return clusters_info