Пример #1
0
    def get_metric_statistics(self,
                              project_id,
                              end_time,
                              metric_name,
                              namespace,
                              period,
                              start_time,
                              statistics,
                              unit=None,
                              dimensions=None):
        """
        입력받은 조건에 일치하는 메트릭의 통계자료 리스트를 반환한다.
        """
        def to_datapoint(df, idx):
            datapoint = df.ix[idx].dropna()
            if len(datapoint):
                return idx, datapoint

        end_idx = end_time.replace(second=0, microsecond=0)
        start_idx = start_time.replace(second=0, microsecond=0)
        start_ana_idx = start_idx - datetools.Minute() * (period / 60)
        daterange = DateRange(start_idx, end_idx, offset=datetools.Minute())
        daterange_ana = DateRange(start_ana_idx,
                                  end_idx,
                                  offset=datetools.Minute())

        # load default unit for metric from database
        if unit == "None" or not unit:
            metric_key = self.cass.get_metric_key(project_id=project_id,
                                                  namespace=namespace,
                                                  metric_name=metric_name,
                                                  dimensions=dimensions)

            if metric_key:
                unit = self.cass.get_metric_unit(metric_key)
            else:
                unit = "None"

        # load statistics data from database
        stats = self.cass.get_metric_statistics(project_id=project_id,
                                                namespace=namespace,
                                                metric_name=metric_name,
                                                start_time=start_ana_idx,
                                                end_time=end_time,
                                                period=period,
                                                statistics=statistics,
                                                dimensions=dimensions)

        period = period / 60  # convert sec to min
        stat = DataFrame(index=daterange)

        for statistic, series in zip(statistics, stats):
            func = self.ROLLING_FUNC_MAP[statistic]
            ts = TimeSeries(series, index=daterange_ana)
            rolled_ts = func(ts, period, min_periods=0)
            stat[statistic] = rolled_ts.ix[::period]
            LOG.debug("stat %s\n%s" % (statistic, stat[statistic]))

        ret = filter(None, (to_datapoint(stat, i) for i in stat.index))
        return ret, unit
Пример #2
0
def rountrip_archive(N, K=50, iterations=10):
    # Create data
    arr = np.random.randn(N, K)
    # lar = la.larry(arr)
    dma = pandas.DataFrame(
        arr, DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute()))
    dma[201] = 'bar'

    # filenames
    filename_numpy = '/Users/wesm/tmp/numpy.npz'
    filename_larry = '/Users/wesm/tmp/archive.hdf5'
    filename_pandas = '/Users/wesm/tmp/pandas_tmp'

    # Delete old files
    try:
        os.unlink(filename_numpy)
    except:
        pass
    try:
        os.unlink(filename_larry)
    except:
        pass

    try:
        os.unlink(filename_pandas)
    except:
        pass

    # Time a round trip save and load
    # numpy_f = lambda: numpy_roundtrip(filename_numpy, arr, arr)
    # numpy_time = timeit(numpy_f, iterations) / iterations

    # larry_f = lambda: larry_roundtrip(filename_larry, lar, lar)
    # larry_time = timeit(larry_f, iterations) / iterations

    pandas_f = lambda: pandas_roundtrip(filename_pandas, dma, dma)
    pandas_time = timeit(pandas_f, iterations) / iterations
    print('pandas (HDF5) %7.4f seconds' % pandas_time)

    pickle_f = lambda: pandas_roundtrip(filename_pandas, dma, dma)
    pickle_time = timeit(pickle_f, iterations) / iterations
    print('pandas (pickle) %7.4f seconds' % pickle_time)

    # print('Numpy (npz)   %7.4f seconds' % numpy_time)
    # print('larry (HDF5)  %7.4f seconds' % larry_time)

    # Delete old files
    try:
        os.unlink(filename_numpy)
    except:
        pass
    try:
        os.unlink(filename_larry)
    except:
        pass

    try:
        os.unlink(filename_pandas)
    except:
        pass
Пример #3
0
    def _get_range(self):
        now_idx = datetime.utcnow().replace(second=0, microsecond=0)

        start = now_idx - timedelta(seconds=self.left_offset)
        end = now_idx + timedelta(seconds=self.right_offset)

        daterange = DateRange(start, end, offset=datetools.Minute())

        return daterange
Пример #4
0
    def _check_alarm(self, alarmkey, alarm, query_time=None):
        period = int(alarm['period'] / 60)
        evaluation_periods = alarm['evaluation_periods']
        statistic = alarm['statistic']
        threshold = alarm['threshold']
        alarm_name = alarm['alarm_name']
        cmp_op = self.CMP_MAP[alarm['comparison_operator']]
        unit = alarm['unit']
        state_value = alarm['state_value']

        query_time = query_time if query_time else utils.utcnow()

        for i in range(self.insufficient_buffer):
            end_idx = (query_time.replace(second=0, microsecond=0) -
                       (i + 1) * datetools.Minute())
            try:
                end_datapoint = self.df[statistic].ix[end_idx]
            except KeyError:
                end_datapoint = None
            if not isnull(end_datapoint):
                break

        start_idx = (end_idx -
                     (period * evaluation_periods) * datetools.Minute())
        start_ana_idx = start_idx - datetools.Minute() * period

        func = self.ROLLING_FUNC_MAP[statistic]
        data = func(self.df[statistic].ix[start_ana_idx:end_idx],
                    period,
                    min_periods=0).ix[start_idx:end_idx:period][1:]
        recent_datapoints = list(data)

        if unit and statistic is not 'SampleCount':
            data = data / utils.UNIT_CONV_MAP[unit]
            threshold = threshold / utils.UNIT_CONV_MAP[unit]

        data = data.dropna()

        query_date = utils.strtime(query_time)
        reason_data = {
            "period": alarm['period'],
            "queryDate": query_date,
            "recentDatapoints": recent_datapoints,
            "startDate": utils.strtime(start_idx),
            "statistic": statistic,
            "threshold": threshold,
            "version": "1.0",
        }
        old_state = {
            'stateReason': alarm.get('reason', ""),
            'stateValue': alarm.get('state_value', "INSUFFICIENT_DATA"),
            'stateReasonData': json.loads(alarm.get('reason_data', "{}"))
        }
        json_reason_data = json.dumps(reason_data)

        if len(data) == 0:
            if state_value != 'INSUFFICIENT_DATA':
                template = _("Insufficient Data: %d datapoints were unknown.")
                reason = template % (evaluation_periods - len(data))
                new_state = {
                    'stateReason': reason,
                    'stateReasonData': reason_data,
                    'stateValue': 'INSUFFICIENT_DATA'
                }
                self.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA', reason,
                                        json_reason_data, query_time)
                self.cass.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA',
                                             reason, json_reason_data,
                                             query_time)
                self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                old_state)
                self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                     query_date)
                LOG.audit("Alarm %s status changed to INSUFFICIENT_DATA",
                          alarm_name)
        else:
            sufficient = len(data) >= evaluation_periods
            crossed = (sufficient
                       and reduce(operator.and_, cmp_op(data, threshold)))
            com_op = alarm['comparison_operator']

            if crossed:
                template = _("Threshold Crossed: %d datapoints were %s " +
                             "the threshold(%f). " +
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'ALARM':
                    new_state = {
                        'stateReason': reason,
                        'stateReasonData': reason_data,
                        'stateValue': 'ALARM'
                    }

                    self.update_alarm_state(alarmkey, 'ALARM', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'ALARM', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                    old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)
                    LOG.audit("Alarm %s status changed to ALARM", alarm_name)
            else:
                template = _("Threshold Crossed: %d datapoints were not %s " +
                             "the threshold(%f). " +
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'OK':
                    new_state = {
                        'stateReason': reason,
                        'stateReasonData': reason_data,
                        'stateValue': 'OK'
                    }
                    self.update_alarm_state(alarmkey, 'OK', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'OK', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                    old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)
                    LOG.audit("Alarm %s status changed to OK", alarm_name)