Ejemplo n.º 1
0
 def setUp(self):
     self.period_begin = tzutils.local_to_utc(
         tzutils.get_month_start()).isoformat()
     self.period_end = tzutils.local_to_utc(
         tzutils.get_next_month()).isoformat()
     self.client = influx.InfluxClient()
     self._storage = influx.InfluxStorage()
Ejemplo n.º 2
0
    def configure_and_execute_overlap_test(self, schedule_get_all_mock,
                                           start_reprocess_time,
                                           end_reprocess_time):

        self.configure_schedules_mock(schedule_get_all_mock,
                                      start_reprocess_time, end_reprocess_time)

        scheduling_range = DateTimeRange(
            tzutils.utc_to_local(self.start_reprocess_time),
            tzutils.utc_to_local(self.end_reprocess_time))
        scheduled_range = DateTimeRange(
            tzutils.local_to_utc(start_reprocess_time),
            tzutils.local_to_utc(end_reprocess_time))
        expected_message = \
            "400 Bad Request: Cannot schedule a reprocessing for scope " \
            "[toStringMock] for reprocessing time [%s], because it already " \
            "has a schedule for a similar time range [%s]." \
            % (scheduling_range, scheduled_range)

        expected_message = re.escape(expected_message)

        self.assertRaisesRegex(
            http_exceptions.BadRequest, expected_message,
            self.endpoint.validate_reprocessing_schedules_overlaps,
            self.generate_all_scopes_object(), self.end_reprocess_time,
            self.start_reprocess_time)

        schedule_get_all_mock.assert_called_with(
            identifier=[self.scope_ids[0]])
Ejemplo n.º 3
0
    def summary(self,
                begin=None,
                end=None,
                tenant_id=None,
                service=None,
                groupby=None,
                all_tenants=False):
        """Return the summary to pay for a given period.

        """
        if not begin:
            begin = ck_utils.get_month_start()
        if not end:
            end = ck_utils.get_next_month()

        if all_tenants:
            tenant_id = None
        else:
            tenant_context = pecan.request.context.project_id
            tenant_id = tenant_context if not tenant_id else tenant_id
        policy.authorize(pecan.request.context, 'report:get_summary',
                         {"project_id": tenant_id})
        storage = pecan.request.storage_backend

        scope_key = CONF.collect.scope_key
        storage_groupby = []
        if groupby is not None and 'tenant_id' in groupby:
            storage_groupby.append(scope_key)
        if groupby is not None and 'res_type' in groupby:
            storage_groupby.append('type')
        filters = {scope_key: tenant_id} if tenant_id else None
        result = storage.total(groupby=storage_groupby,
                               begin=begin,
                               end=end,
                               metric_types=service,
                               filters=filters)

        summarymodels = []
        for res in result['results']:
            kwargs = {
                'res_type': res.get('type') or res.get('res_type'),
                'tenant_id': res.get(scope_key) or res.get('tenant_id'),
                'begin': tzutils.local_to_utc(res['begin'], naive=True),
                'end': tzutils.local_to_utc(res['end'], naive=True),
                'rate': res['rate'],
            }
            summarymodel = report_models.SummaryModel(**kwargs)
            summarymodels.append(summarymodel)

        return report_models.SummaryCollectionModel(summary=summarymodels)
Ejemplo n.º 4
0
 def retrieve(self, begin=None, end=None,
              filters=None,
              metric_types=None,
              offset=0, limit=100, paginate=True):
     tenant_id = filters.get('project_id') if filters else None
     metric_types = self._check_metric_types(metric_types)
     frames = self.storage.get_time_frame(
         tzutils.local_to_utc(begin, naive=True) if begin else None,
         tzutils.local_to_utc(end, naive=True) if end else None,
         res_type=metric_types,
         tenant_id=tenant_id)
     frames = [dataframe.DataFrame.from_dict(frame, legacy=True)
               for frame in frames]
     self._localize_dataframes(frames)
     return {
         'total': len(frames),
         'dataframes': frames,
     }
Ejemplo n.º 5
0
    def total(self, **arguments):
        filters = arguments.pop('filters', None)
        if filters:
            tenant_id = filters.get('project_id')

            arguments['tenant_id'] = tenant_id
        else:
            tenant_id = None

        groupby = arguments.get('groupby')
        storage_gby = self.get_storage_groupby(groupby)

        metric_types = arguments.pop('metric_types', None)
        if metric_types:
            metric_types = self._check_metric_types(metric_types)
            arguments['service'] = metric_types

        arguments['begin'] = tzutils.local_to_utc(arguments['begin'],
                                                  naive=True)
        arguments['end'] = tzutils.local_to_utc(arguments['end'], naive=True)

        arguments['groupby'] = storage_gby

        total = self.storage.get_total(**arguments)

        for t in total:
            if t.get('tenant_id') is None:
                t['tenant_id'] = tenant_id
            if t.get('rate') is None:
                t['rate'] = float(0)
            if groupby and 'type' in groupby:
                t['type'] = t.get('res_type')
            else:
                t['type'] = None
        self._localize_total(total)
        return {
            'total': len(total),
            'results': total,
        }
Ejemplo n.º 6
0
    def total(self, groupby=None,
              begin=None, end=None,
              metric_types=None,
              filters=None,
              offset=0, limit=100, paginate=True):
        tenant_id = filters.get('project_id') if filters else None

        storage_gby = []
        if groupby:
            for elem in set(groupby):
                if elem == 'type':
                    storage_gby.append('res_type')
                elif elem == 'project_id':
                    storage_gby.append('tenant_id')
        storage_gby = ','.join(storage_gby) if storage_gby else None
        metric_types = self._check_metric_types(metric_types)
        total = self.storage.get_total(
            tzutils.local_to_utc(begin, naive=True),
            tzutils.local_to_utc(end, naive=True),
            tenant_id=tenant_id,
            service=metric_types,
            groupby=storage_gby)

        for t in total:
            if t.get('tenant_id') is None:
                t['tenant_id'] = tenant_id
            if t.get('rate') is None:
                t['rate'] = float(0)
            if groupby and 'type' in groupby:
                t['type'] = t.get('res_type')
            else:
                t['type'] = None
        self._localize_total(total)
        return {
            'total': len(total),
            'results': total,
        }
Ejemplo n.º 7
0
    def validate_reprocessing_schedules_overlaps(
            self, all_scopes_to_reprocess, end_reprocess_time,
            start_reprocess_time):

        scheduling_range = DateTimeRange(
            start_reprocess_time, end_reprocess_time)

        for scope_to_reprocess in all_scopes_to_reprocess:
            all_reprocessing_schedules = self.schedule_reprocessing_db.get_all(
                identifier=[scope_to_reprocess.identifier])

            LOG.debug("All schedules [%s] for reprocessing found for scope "
                      "[%s]", all_reprocessing_schedules, scope_to_reprocess)
            if not all_reprocessing_schedules:
                LOG.debug(
                    "No need to validate possible collision of reprocessing "
                    "for scope [%s] because it does not have active "
                    "reprocessing schedules." % scope_to_reprocess)
                continue

            for schedule in all_reprocessing_schedules:
                scheduled_range = DateTimeRange(
                    tzutils.local_to_utc(schedule.start_reprocess_time),
                    tzutils.local_to_utc(schedule.end_reprocess_time))

                try:
                    if scheduling_range.is_intersection(scheduled_range):
                        raise http_exceptions.BadRequest(
                            self.generate_overlap_error_message(
                                scheduled_range, scheduling_range,
                                scope_to_reprocess))
                except ValueError as e:
                    raise http_exceptions.BadRequest(
                        self.generate_overlap_error_message(
                            scheduled_range, scheduling_range,
                            scope_to_reprocess) + "Error: [%s]." % e)
Ejemplo n.º 8
0
    def do_execute_scope_processing(self, timestamp):
        end_of_this_processing = timestamp + timedelta(seconds=self._period)

        end_of_this_processing = tzutils.local_to_utc(end_of_this_processing)

        LOG.debug("Cleaning backend [%s] data for reprocessing scope [%s] "
                  "for timeframe[start=%s, end=%s].",
                  self._storage, self.scope, timestamp, end_of_this_processing)

        self._storage.delete(
            begin=timestamp, end=end_of_this_processing,
            filters={self.scope_key: self._tenant_id})

        LOG.debug("Executing the reprocessing of scope [%s] for "
                  "timeframe[start=%s, end=%s].", self.scope, timestamp,
                  end_of_this_processing)

        super(ReprocessingWorker, self).do_execute_scope_processing(timestamp)
Ejemplo n.º 9
0
    def update_storage_scope(self,
                             storage_scope_to_update,
                             scope_key=None,
                             fetcher=None,
                             collector=None,
                             active=None):
        """Update storage scope data.

        :param storage_scope_to_update: The storage scope to update in the DB
        :type storage_scope_to_update: object
        :param fetcher: Fetcher associated to the scope
        :type fetcher: str
        :param collector: Collector associated to the scope
        :type collector: str
        :param scope_key: scope_key associated to the scope
        :type scope_key: str
        :param active: indicates if the storage scope is active for processing
        :type active: bool
        """
        session = db.get_session()
        session.begin()

        db_scope = self._get_db_item(session,
                                     storage_scope_to_update.identifier,
                                     storage_scope_to_update.fetcher,
                                     storage_scope_to_update.collector,
                                     storage_scope_to_update.scope_key)

        if scope_key:
            db_scope.scope_key = scope_key
        if fetcher:
            db_scope.fetcher = fetcher
        if collector:
            db_scope.collector = collector
        if active is not None and active != db_scope.active:
            db_scope.active = active

            now = tzutils.localized_now()
            db_scope.scope_activation_toggle_date = tzutils.local_to_utc(
                now, naive=True)

        session.commit()
        session.close()
Ejemplo n.º 10
0
    def generate_next_timestamp(db_item, processing_period_interval):
        new_timestamp = db_item.start_reprocess_time
        if db_item.current_reprocess_time:
            period_delta = timedelta(seconds=processing_period_interval)

            new_timestamp = db_item.current_reprocess_time + period_delta

            LOG.debug("Current reprocessed time is [%s], therefore, the next "
                      "one to process is [%s] based on the processing "
                      "interval [%s].", db_item.start_reprocess_time,
                      new_timestamp, processing_period_interval)
        else:
            LOG.debug("There is no reprocessing for the schedule [%s]. "
                      "Therefore, we use the start time [%s] as the first "
                      "time to process.", db_item, new_timestamp)
        if new_timestamp <= db_item.end_reprocess_time:
            return tzutils.local_to_utc(new_timestamp)
        else:
            LOG.debug("No need to keep reprocessing schedule [%s] as we "
                      "processed all requested timestamps.", db_item)
            return None
Ejemplo n.º 11
0
    def update_reprocessing_time(self,
                                 identifier=None,
                                 start_reprocess_time=None,
                                 end_reprocess_time=None,
                                 new_current_time_stamp=None):
        """Update current processing time for a reprocessing schedule

        :param identifier: Identifier of the scope
        :type identifier: str
        :param start_reprocess_time: the start time used in the
                                     reprocessing schedule
        :type start_reprocess_time: datetime.datetime
        :param end_reprocess_time: the end time used in the
                                     reprocessing schedule
        :type end_reprocess_time: datetime.datetime
        :param new_current_time_stamp: the new current timestamp to set
        :type new_current_time_stamp: datetime.datetime
        """

        session = db.get_session()
        session.begin()

        result_set = self._get_db_item(end_reprocess_time, identifier, session,
                                       start_reprocess_time)

        if not result_set:
            LOG.warning(
                "Trying to update current time to [%s] for identifier "
                "[%s] and reprocessing range [start=%, end=%s], but "
                "we could not find a this task in the database.",
                new_current_time_stamp, identifier, start_reprocess_time,
                end_reprocess_time)
            return
        new_current_time_stamp = tzutils.local_to_utc(new_current_time_stamp,
                                                      naive=True)

        result_set.current_reprocess_time = new_current_time_stamp
        session.commit()
        session.close()
Ejemplo n.º 12
0
    def set_last_processed_timestamp(self,
                                     identifier,
                                     last_processed_timestamp,
                                     fetcher=None,
                                     collector=None,
                                     scope_key=None):
        """Set the last processed timestamp of a scope.

        If the scope does not exist yet in the database, it will create it.

        :param identifier: Identifier of the scope
        :type identifier: str
        :param last_processed_timestamp: last processed timestamp of the scope
        :type last_processed_timestamp: datetime.datetime
        :param fetcher: Fetcher associated to the scope
        :type fetcher: str
        :param collector: Collector associated to the scope
        :type collector: str
        :param scope_key: scope_key associated to the scope
        :type scope_key: str
        """
        last_processed_timestamp = tzutils.local_to_utc(
            last_processed_timestamp, naive=True)
        session = db.get_session()
        session.begin()
        r = self._get_db_item(session, identifier, fetcher, collector,
                              scope_key)

        if r:
            if r.last_processed_timestamp != last_processed_timestamp:
                r.last_processed_timestamp = last_processed_timestamp
                session.commit()
        else:
            self.create_scope(identifier,
                              last_processed_timestamp,
                              fetcher=fetcher,
                              collector=collector,
                              scope_key=scope_key)
        session.close()
Ejemplo n.º 13
0
    def set_state(self, identifier, state,
                  fetcher=None, collector=None, scope_key=None):
        """Set the state of a scope.

        :param identifier: Identifier of the scope
        :type identifier: str
        :param state: state of the scope
        :type state: datetime.datetime
        :param fetcher: Fetcher associated to the scope
        :type fetcher: str
        :param collector: Collector associated to the scope
        :type collector: str
        :param scope_key: scope_key associated to the scope
        :type scope_key: str
        """
        state = tzutils.local_to_utc(state, naive=True)
        session = db.get_session()
        session.begin()
        r = self._get_db_item(
            session, identifier, fetcher, collector, scope_key)

        if r:
            if r.state != state:
                r.state = state
                session.commit()
        else:
            state_object = self.model(
                identifier=identifier,
                state=state,
                fetcher=fetcher,
                collector=collector,
                scope_key=scope_key,
            )
            session.add(state_object)
            session.commit()

        session.close()
Ejemplo n.º 14
0
 def _local_to_utc(*args):
     return [tzutils.local_to_utc(arg) for arg in args]
Ejemplo n.º 15
0
 def test_local_to_utc_not_naive(self):
     local = tzutils.local_to_utc(self.local_now)
     naive = tzutils.local_to_utc(self.naive_now)
     self.assertIsNotNone(local.tzinfo)
     self.assertIsNotNone(naive.tzinfo)
     self.assertEqual(local, naive)
Ejemplo n.º 16
0
    def get_all(self,
                begin=None,
                end=None,
                tenant_id=None,
                resource_type=None):
        """Return a list of rated resources for a time period and a tenant.

        :param begin: Start of the period
        :param end: End of the period
        :param tenant_id: UUID of the tenant to filter on.
        :param resource_type: Type of the resource to filter on.
        :return: Collection of DataFrame objects.
        """

        project_id = tenant_id or pecan.request.context.project_id
        policy.authorize(pecan.request.context, 'storage:list_data_frames', {
            'project_id': project_id,
        })

        scope_key = CONF.collect.scope_key
        backend = pecan.request.storage_backend
        dataframes = []
        if pecan.request.context.is_admin:
            filters = {scope_key: tenant_id} if tenant_id else None
        else:
            # Unscoped non-admin user
            if project_id is None:
                return {'dataframes': []}
            filters = {scope_key: project_id}
        try:
            resp = backend.retrieve(begin,
                                    end,
                                    filters=filters,
                                    metric_types=resource_type,
                                    paginate=False)
        except storage.NoTimeFrame:
            return storage_models.DataFrameCollection(dataframes=[])
        for frame in resp['dataframes']:
            frame_tenant = None
            for type_, points in frame.itertypes():
                resources = []
                for point in points:
                    resource = storage_models.RatedResource(service=type_,
                                                            desc=point.desc,
                                                            volume=point.qty,
                                                            rating=point.price)
                    if frame_tenant is None:
                        # NOTE(jferrieu): Since DataFrame/DataPoint
                        # implementation patch we cannot guarantee
                        # anymore that a DataFrame does contain a scope_id
                        # therefore the __UNDEF__ default value has been
                        # retained to maintain backward compatibility
                        # if it would occur being absent
                        frame_tenant = point.desc.get(scope_key, '__UNDEF__')
                    resources.append(resource)
                dataframe = storage_models.DataFrame(
                    begin=tzutils.local_to_utc(frame.start, naive=True),
                    end=tzutils.local_to_utc(frame.end, naive=True),
                    tenant_id=frame_tenant,
                    resources=resources)
                dataframes.append(dataframe)
        return storage_models.DataFrameCollection(dataframes=dataframes)
Ejemplo n.º 17
0
 def _check_begin_end(begin, end):
     if not begin:
         begin = tzutils.get_month_start()
     if not end:
         end = tzutils.get_next_month()
     return tzutils.local_to_utc(begin), tzutils.local_to_utc(end)
Ejemplo n.º 18
0
 def test_local_to_utc_naive(self):
     naive_local = tzutils.local_to_utc(self.local_now, naive=True)
     naive_naive = tzutils.local_to_utc(self.naive_now, naive=True)
     self.assertIsNone(naive_local.tzinfo)
     self.assertIsNone(naive_naive.tzinfo)
     self.assertEqual(naive_local, naive_naive)