def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() error_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.error).all() all_envs_last_recs = \ OpenStackWorkloadStatsCollection.get_last_by_resource_type( resource_type) ready_or_error_ids = set([c.id for c in operational_clusters] + [c.id for c in error_clusters]) envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \ ready_or_error_ids # Clear current resource data for unavailable clusters. # Current OSWL data is cleared for those clusters which status is not # 'operational' nor 'error' or when cluster was removed. Data is # cleared for cluster only if it was updated recently (today or # yesterday). While this collector is running with interval much # smaller than one day it should not miss any unavailable cluster. for id in envs_ids_to_clear: oswl_statistics_save(id, resource_type, []) # Collect current OSWL data and update data in DB for cluster in operational_clusters: try: client_provider = helpers.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) version_info = utils.get_version_info(cluster) with utils.set_proxy(proxy_for_os_api): data = helpers.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data, version_info=version_info) except errors.StatsException as e: logger.error("Cannot collect OSWL resource {0} for cluster " "with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) except Exception as e: logger.exception("Error while collecting OSWL resource {0} " "for cluster with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}" .format(resource_type, six.text_type(e))) finally: db.remove()
def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) release_id = d.get("release", d.get("release_id", None)) if release_id: release = Release.get_by_uid(release_id) if not release: raise errors.InvalidData("Invalid release ID", log_message=True) return d
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = ClusterCollection.filter_by_not(None, id=instance.id) if ClusterCollection.filter_by(query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) for k in ("net_provider", ): if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True) return d
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData(u"Release ID is required", log_message=True) if "name" in d: if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) return d
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData( u"Release ID is required", log_message=True) if "name" in d: if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) return d
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = ClusterCollection.filter_by_not(None, id=instance.id) if ClusterCollection.filter_by(query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) for k in ("net_provider",): if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True ) return d
def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): if ClusterCollection.filter_by( query=None, name=d["name"] ).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) release_id = d.get("release", d.get("release_id", None)) if release_id: release = Release.get_by_uid(release_id) if not release: raise errors.InvalidData( "Invalid release ID", log_message=True ) return d
def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() for cluster in operational_clusters: client_provider = utils.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) with utils.set_proxy(proxy_for_os_api): data = utils.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}".format( resource_type, six.text_type(e))) finally: db.remove()