def _query_stats_metadata(cluster, stat_names):
    """
    Query the specified cluster for the metadata of the stats specified in
    stat_names list.
    """
    stats_api = cluster.isi_sdk.StatisticsApi(cluster.api_client)
    isi_stats_client = IsiStatsClient(stats_api)
    return isi_stats_client.get_stats_metadata(stat_names)
def _query_stats_metadata(cluster, stat_names):
    """
    Query the specified cluster for the metadata of the stats specified in
    stat_names list.
    """
    stats_api = cluster.isi_sdk.StatisticsApi(cluster.api_client)
    isi_stats_client = IsiStatsClient(stats_api)
    return isi_stats_client.get_stats_metadata(stat_names)
    def _query_and_process_stats1(
        self,
        cluster,
        stats,
        composite_stats,
        eq_stats,
        pct_change_stats,
        final_eq_stats,
        debug,
    ):
        LOG.debug("Querying cluster %s %f", cluster.name, cluster.version)
        LOG.debug("Querying stats %d.", len(stats))
        stats_client = IsiStatsClient(cluster.isi_sdk.StatisticsApi(cluster.api_client))
        # query the current cluster with the current set of stats
        try:
            if cluster.version >= 8.0:
                results = stats_client.query_stats(stats)
            else:
                results = self._v7_2_multistat_query(stats, stats_client)
        except (
            urllib3.exceptions.HTTPError,
            cluster.isi_sdk.rest.ApiException,
        ) as http_exc:
            LOG.error(
                "Failed to query stats from cluster %s, exception " "raised: %s",
                cluster.name,
                str(http_exc),
            )
            return
        except Exception as gen_exc:
            # if in debug mode then re-raise general Exceptions because
            # they are most likely bugs in the code, but in non-debug mode
            # just continue
            if debug is False:
                LOG.error(
                    "Failed to query stats from cluster %s, exception " "raised: %s",
                    cluster.name,
                    str(gen_exc),
                )
                return
            else:
                raise gen_exc

        composite_stats_processor = DerivedStatsProcessor(composite_stats)
        equation_stats_processor = DerivedStatsProcessor(eq_stats)
        pct_change_stats_processor = DerivedStatsProcessor(pct_change_stats)
        final_equation_stats_processor = DerivedStatsProcessor(final_eq_stats)
        derived_stats_processors = (
            composite_stats_processor,
            equation_stats_processor,
            pct_change_stats_processor,
            final_equation_stats_processor,
        )
        # calls either _process_all_stats or
        # _process_stats_with_derived_stats depending on whether or not the
        # _stats_processor has a process_stat function or just a process
        # function. The latter requires the process_stat function.
        self._process_stats_func(cluster.name, results, derived_stats_processors)
    def _query_and_process_stats(self, cur_time):
        """
        Build a unique set of stats to update per cluster from each set of
        stats that are in need of updating based on the amount of time elapsed
        since their last update.
        """
        # there might be more than one stat set that needs updating and thus
        # there might be common clusters between those stat sets, so this loop
        # makes sure that we only send one query to each unique cluster.
        cluster_stats = {}
        for update_interval in self._update_intervals:
            # if the update_interval is less than or equal to the elapsed_time
            # then we need to query the stats associated with this update
            # interval.
            time_since_last_update = cur_time - update_interval.last_update
            if time_since_last_update >= update_interval.interval:
                LOG.debug("updating interval:%d time_since_last_update: %f",
                        update_interval.interval, time_since_last_update)
                # update the last_update time
                update_interval.last_update = cur_time
                # add the stats from stat set to their respective cluster_stats
                cur_stat_set = self._stat_sets[update_interval.interval]
                for stat_name in cur_stat_set.stats:
                    for cluster in cur_stat_set.cluster_configs:
                        try:
                            cluster_stat_set = cluster_stats[cluster]
                        except KeyError:
                            cluster_stats[cluster] = cluster_stat_set = set()
                        cluster_stat_set.add(stat_name)

        # now we have a unique list of clusters to query, so query them
        for cluster, stats in cluster_stats.iteritems():
            LOG.debug("Querying cluster %s %f", cluster.name, cluster.version)
            LOG.debug("Querying stats %d.", len(stats))
            stats_client = \
                    IsiStatsClient(
                            cluster.isi_sdk.StatisticsApi(cluster.api_client))
            # query the current cluster with the current set of stats
            try:
                if cluster.version >= 8.0:
                    results = stats_client.query_stats(stats)
                else:
                    results = []
                    for stat in stats:
                        result = stats_client.query_stat(stat)
                        results.extend(result)

            except (urllib3.exceptions.HTTPError,
                    cluster.isi_sdk.rest.ApiException) as http_exc:
                LOG.error("Failed to query stats from cluster %s, exception "\
                          "raised: %s", cluster.name, str(http_exc))
                continue
            # process the results
            self._stats_processor.process(cluster.name, results)
    def _query_and_process_stats1(self, cluster, stats, composite_stats,
            eq_stats, pct_change_stats, final_eq_stats):
        LOG.debug("Querying cluster %s %f", cluster.name, cluster.version)
        LOG.debug("Querying stats %d.", len(stats))
        stats_client = \
                IsiStatsClient(
                        cluster.isi_sdk.StatisticsApi(cluster.api_client))
        # query the current cluster with the current set of stats
        try:
            if cluster.version >= 8.0:
                results = stats_client.query_stats(stats)
            else:
                results = \
                        self._v7_2_multistat_query(
                                stats, stats_client)
        except (urllib3.exceptions.HTTPError,
                cluster.isi_sdk.rest.ApiException) as http_exc:
            LOG.error("Failed to query stats from cluster %s, exception "\
                      "raised: %s", cluster.name, str(http_exc))
            return
        except Exception as gen_exc:
            # if in debug mode then re-raise general Exceptions because
            # they are most likely bugs in the code, but in non-debug mode
            # just continue
            if debug is False:
                LOG.error("Failed to query stats from cluster %s, exception "\
                          "raised: %s", cluster.name, str(gen_exc))
                return
            else:
                raise gen_exc

        composite_stats_processor = \
                DerivedStatsProcessor(composite_stats)
        equation_stats_processor = \
                DerivedStatsProcessor(eq_stats)
        pct_change_stats_processor = \
                DerivedStatsProcessor(pct_change_stats)
        final_equation_stats_processor = \
                DerivedStatsProcessor(final_eq_stats)
        derived_stats_processors = \
                (composite_stats_processor,
                        equation_stats_processor,
                        pct_change_stats_processor,
                        final_equation_stats_processor)
        # calls either _process_all_stats or
        # _process_stats_with_derived_stats depending on whether or not the
        # _stats_processor has a process_stat function or just a process
        # function. The latter requires the process_stat function.
        self._process_stats_func(
                cluster.name, results, derived_stats_processors)
    def _query_and_process_stats(self, cur_time, debug):
        """
        Build a unique set of stats to update per cluster from each set of
        stats that are in need of updating based on the amount of time elapsed
        since their last update.
        """
        # there might be more than one stat set that needs updating and thus
        # there might be common clusters between those stat sets, so this loop
        # makes sure that we only send one query to each unique cluster.
        cluster_stats = {}
        for update_interval in self._update_intervals:
            # if the update_interval is less than or equal to the elapsed_time
            # then we need to query the stats associated with this update
            # interval.
            time_since_last_update = cur_time - update_interval.last_update
            if time_since_last_update >= update_interval.interval:
                LOG.debug("updating interval:%d time_since_last_update: %f",
                          update_interval.interval, time_since_last_update)
                # update the last_update time
                update_interval.last_update = cur_time
                # add the stats from stat set to their respective cluster_stats
                cur_stat_set = self._stat_sets[update_interval.interval]
                for cluster in cur_stat_set.cluster_configs:
                    try:
                        (cluster_stat_set,
                                cluster_composite_stats,
                                equation_stats,
                                pct_change_stats,
                                final_equation_stats) = \
                                        cluster_stats[cluster]
                        cluster_composite_stats.extend(
                            cur_stat_set.cluster_composite_stats)
                        equation_stats.extend(cur_stat_set.equation_stats)
                        pct_change_stats.extend(cur_stat_set.pct_change_stats)
                        final_equation_stats.extend(
                            cur_stat_set.final_equation_stats)
                    except KeyError:
                        cluster_stat_set = set()
                        cluster_stats[cluster] = (
                            cluster_stat_set,
                            cur_stat_set.cluster_composite_stats,
                            cur_stat_set.equation_stats,
                            cur_stat_set.pct_change_stats,
                            cur_stat_set.final_equation_stats)

                    for stat_name in cur_stat_set.stats:
                        cluster_stat_set.add(stat_name)

        # now we have a unique list of clusters to query, so query them
        for cluster, (stats, composite_stats, eq_stats, pct_change_stats,
                      final_eq_stats) in cluster_stats.iteritems():
            LOG.debug("Querying cluster %s %f", cluster.name, cluster.version)
            LOG.debug("Querying stats %d.", len(stats))
            stats_client = \
                    IsiStatsClient(
                            cluster.isi_sdk.StatisticsApi(cluster.api_client))
            # query the current cluster with the current set of stats
            try:
                if cluster.version >= 8.0:
                    results = stats_client.query_stats(stats)
                else:
                    results = \
                            self._v7_2_stat_query_result_generator(
                                    stats, stats_client)
            except (urllib3.exceptions.HTTPError,
                    cluster.isi_sdk.rest.ApiException) as http_exc:
                LOG.error("Failed to query stats from cluster %s, exception "\
                          "raised: %s", cluster.name, str(http_exc))
                continue
            except Exception as gen_exc:
                # if in debug mode then re-raise general Exceptions because
                # they are most likely bugs in the code, but in non-debug mode
                # just continue
                if debug is False:
                    LOG.error("Failed to query stats from cluster %s, exception "\
                              "raised: %s", cluster.name, str(gen_exc))
                    continue
                else:
                    raise gen_exc

            composite_stats_processor = \
                    DerivedStatsProcessor(composite_stats)
            equation_stats_processor = \
                    DerivedStatsProcessor(eq_stats)
            pct_change_stats_processor = \
                    DerivedStatsProcessor(pct_change_stats)
            final_equation_stats_processor = \
                    DerivedStatsProcessor(final_eq_stats)
            derived_stats_processors = \
                    (composite_stats_processor,
                            equation_stats_processor,
                            pct_change_stats_processor,
                            final_equation_stats_processor)
            # calls either _process_all_stats or
            # _process_stats_with_derived_stats depending on whether or not the
            # _stats_processor has a process_stat function or just a process
            # function. The latter requires the process_stat function.
            self._process_stats_func(cluster.name, results,
                                     derived_stats_processors)