def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() error_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.error).all() all_envs_last_recs = \ OpenStackWorkloadStatsCollection.get_last_by_resource_type( resource_type) ready_or_error_ids = set([c.id for c in operational_clusters] + [c.id for c in error_clusters]) envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \ ready_or_error_ids # Clear current resource data for unavailable clusters. # Current OSWL data is cleared for those clusters which status is not # 'operational' nor 'error' or when cluster was removed. Data is # cleared for cluster only if it was updated recently (today or # yesterday). While this collector is running with interval much # smaller than one day it should not miss any unavailable cluster. for id in envs_ids_to_clear: oswl_statistics_save(id, resource_type, []) # Collect current OSWL data and update data in DB for cluster in operational_clusters: try: client_provider = helpers.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) version_info = utils.get_version_info(cluster) with utils.set_proxy(proxy_for_os_api): data = helpers.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data, version_info=version_info) except errors.StatsException as e: logger.error("Cannot collect OSWL resource {0} for cluster " "with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) except Exception as e: logger.exception("Error while collecting OSWL resource {0} " "for cluster with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}" .format(resource_type, six.text_type(e))) finally: db.remove()
def test_clean_expired_entries(self): dt_now = datetime.datetime.utcnow() t_delta = datetime.timedelta(days=settings.OSWL_STORING_PERIOD) entries_to_del_cluster_ids = (1, 2) for cluster_id in entries_to_del_cluster_ids: obj_kwargs = { "cluster_id": cluster_id, "resource_type": consts.OSWL_RESOURCE_TYPES.volume, "updated_time": dt_now.time(), "created_date": dt_now.date() - t_delta, "resource_checksum": "" } OpenStackWorkloadStats.create(obj_kwargs) untouched_obj_kwargs = { "cluster_id": 3, "resource_type": consts.OSWL_RESOURCE_TYPES.vm, "updated_time": dt_now.time(), "created_date": dt_now.date(), "resource_checksum": "" } OpenStackWorkloadStats.create(untouched_obj_kwargs) OpenStackWorkloadStatsCollection.clean_expired_entries() self.db.commit() for cluster_id in entries_to_del_cluster_ids: instance = \ OpenStackWorkloadStats.get_last_by( cluster_id, consts.OSWL_RESOURCE_TYPES.volume ) self.assertIsNone(instance) untouched_obj = OpenStackWorkloadStats.get_last_by( untouched_obj_kwargs["cluster_id"], consts.OSWL_RESOURCE_TYPES.vm ) self.assertIsNotNone(untouched_obj)
def check_overall_rec_count(self, count): saved = OpenStackWorkloadStatsCollection.all() self.assertEqual(saved.count(), count) return saved