Exemplo n.º 1
0
 def get(key):
     if key == "tokens/%s" % VALID_TOKEN:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN},
             'user': {
                 'id': 'user_id1',
                 'name': 'user_name1',
                 'tenantId': '123i2910',
                 'tenantName': 'mytenant',
                 'roles': [
                     {'name': 'admin'},
                 ]},
         }}, timeutils.isotime(dt)))
     if key == "tokens/%s" % VALID_TOKEN2:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN2},
             'user': {
                 'id': 'user_id2',
                 'name': 'user-good',
                 'tenantId': 'project-good',
                 'tenantName': 'goodies',
                 'roles': [
                     {'name': 'Member'},
                 ]},
         }}, timeutils.isotime(dt)))
Exemplo n.º 2
0
class Alarm(Base):
    """Define Alarm data."""
    __tablename__ = 'alarm'
    __table_args__ = (
        Index('ix_alarm_user_id', 'user_id'),
        Index('ix_alarm_project_id', 'project_id'),
    )
    alarm_id = Column(String(255), primary_key=True)
    enabled = Column(Boolean)
    name = Column(Text)
    type = Column(String(50))
    description = Column(Text)
    timestamp = Column(PreciseTimestamp, default=lambda: timeutils.utcnow())

    user_id = Column(String(255))
    project_id = Column(String(255))

    state = Column(String(255))
    state_timestamp = Column(PreciseTimestamp,
                             default=lambda: timeutils.utcnow())

    ok_actions = Column(JSONEncodedDict)
    alarm_actions = Column(JSONEncodedDict)
    insufficient_data_actions = Column(JSONEncodedDict)
    repeat_actions = Column(Boolean)

    rule = Column(JSONEncodedDict)
    time_constraints = Column(JSONEncodedDict)
Exemplo n.º 3
0
 def get(key):
     if key == "tokens/%s" % VALID_TOKEN:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN},
             'user': {
                 'id': 'user_id1',
                 'name': 'user_name1',
                 'tenantId': '123i2910',
                 'tenantName': 'mytenant',
                 'roles': [
                     {'name': 'admin'},
                 ]},
         }}, timeutils.isotime(dt)))
     if key == "tokens/%s" % VALID_TOKEN2:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN2},
             'user': {
                 'id': 'user_id2',
                 'name': 'user-good',
                 'tenantId': 'project-good',
                 'tenantName': 'goodies',
                 'roles': [
                     {'name': 'Member'},
                 ]},
         }}, timeutils.isotime(dt)))
Exemplo n.º 4
0
class Sample(Base):
    """Metering data."""

    __tablename__ = 'sample'
    __table_args__ = (
        Index('ix_sample_timestamp', 'timestamp'),
        Index('ix_sample_user_id', 'user_id'),
        Index('ix_sample_project_id', 'project_id'),
    )
    id = Column(Integer, primary_key=True)
    meter_id = Column(Integer, ForeignKey('meter.id'))
    user_id = Column(String(255), ForeignKey('user.id'))
    project_id = Column(String(255), ForeignKey('project.id'))
    resource_id = Column(String(255), ForeignKey('resource.id'))
    resource_metadata = Column(JSONEncodedDict())
    volume = Column(Float(53))
    timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow())
    recorded_at = Column(PreciseTimestamp(),
                         default=lambda: timeutils.utcnow())
    message_signature = Column(String(1000))
    message_id = Column(String(1000))
    sources = relationship("Source", secondary=lambda: sourceassoc)
    meta_text = relationship("MetaText", backref="sample",
                             cascade="all, delete-orphan")
    meta_float = relationship("MetaFloat", backref="sample",
                              cascade="all, delete-orphan")
    meta_int = relationship("MetaBigInt", backref="sample",
                            cascade="all, delete-orphan")
    meta_bool = relationship("MetaBool", backref="sample",
                             cascade="all, delete-orphan")
Exemplo n.º 5
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemplo n.º 6
0
    def test_unit_identified_source_unit_conversion(self):
        transformer_cfg = [
            {
                'name': 'unit_conversion',
                'parameters': {
                    'source': {'unit': '°C'},
                    'target': {'unit': '°F',
                               'scale': '(volume * 1.8) + 32'},
                }
            },
        ]
        self._set_pipeline_cfg('transformers', transformer_cfg)
        self._set_pipeline_cfg('counters', ['core_temperature',
                                            'ambient_temperature'])
        counters = [
            sample.Sample(
                name='core_temperature',
                type=sample.TYPE_GAUGE,
                volume=36.0,
                unit='°C',
                user_id='test_user',
                project_id='test_proj',
                resource_id='test_resource',
                timestamp=timeutils.utcnow().isoformat(),
                resource_metadata={}
            ),
            sample.Sample(
                name='ambient_temperature',
                type=sample.TYPE_GAUGE,
                volume=88.8,
                unit='°F',
                user_id='test_user',
                project_id='test_proj',
                resource_id='test_resource',
                timestamp=timeutils.utcnow().isoformat(),
                resource_metadata={}
            ),
        ]

        pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
                                                    self.transformer_manager)
        pipe = pipeline_manager.pipelines[0]

        pipe.publish_samples(None, counters)
        publisher = pipeline_manager.pipelines[0].publishers[0]
        self.assertEqual(len(publisher.samples), 2)
        core_temp = publisher.samples[1]
        self.assertEqual(getattr(core_temp, 'name'), 'core_temperature')
        self.assertEqual(getattr(core_temp, 'unit'), '°F')
        self.assertEqual(getattr(core_temp, 'volume'), 96.8)
        amb_temp = publisher.samples[0]
        self.assertEqual(getattr(amb_temp, 'name'), 'ambient_temperature')
        self.assertEqual(getattr(amb_temp, 'unit'), '°F')
        self.assertEqual(getattr(amb_temp, 'volume'), 88.8)
        self.assertEqual(getattr(core_temp, 'volume'), 96.8)
Exemplo n.º 7
0
    def test_unit_identified_source_unit_conversion(self):
        self.pipeline_cfg[0]['transformers'] = [
            {
                'name': 'unit_conversion',
                'parameters': {
                    'source': {
                        'unit': '°C'
                    },
                    'target': {
                        'unit': '°F',
                        'scale': '(volume * 1.8) + 32'
                    },
                }
            },
        ]
        self.pipeline_cfg[0]['counters'] = [
            'core_temperature', 'ambient_temperature'
        ]
        counters = [
            sample.Sample(name='core_temperature',
                          type=sample.TYPE_GAUGE,
                          volume=36.0,
                          unit='°C',
                          user_id='test_user',
                          project_id='test_proj',
                          resource_id='test_resource',
                          timestamp=timeutils.utcnow().isoformat(),
                          resource_metadata={}),
            sample.Sample(name='ambient_temperature',
                          type=sample.TYPE_GAUGE,
                          volume=88.8,
                          unit='°F',
                          user_id='test_user',
                          project_id='test_proj',
                          resource_id='test_resource',
                          timestamp=timeutils.utcnow().isoformat(),
                          resource_metadata={}),
        ]

        pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
                                                    self.transformer_manager)
        pipe = pipeline_manager.pipelines[0]

        pipe.publish_samples(None, counters)
        publisher = pipeline_manager.pipelines[0].publishers[0]
        self.assertEqual(len(publisher.samples), 2)
        core_temp = publisher.samples[1]
        self.assertEqual(getattr(core_temp, 'name'), 'core_temperature')
        self.assertEqual(getattr(core_temp, 'unit'), '°F')
        self.assertEqual(getattr(core_temp, 'volume'), 96.8)
        amb_temp = publisher.samples[0]
        self.assertEqual(getattr(amb_temp, 'name'), 'ambient_temperature')
        self.assertEqual(getattr(amb_temp, 'unit'), '°F')
        self.assertEqual(getattr(amb_temp, 'volume'), 88.8)
        self.assertEqual(getattr(core_temp, 'volume'), 96.8)
Exemplo n.º 8
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        # Record the updated resource metadata - we use $setOnInsert to
        # unconditionally insert sample timestamps and resource metadata
        # (in the update case, this must be conditional on the sample not
        # being out-of-order)
        resource = self.db.resource.find_and_modify(
            {'_id': data['resource_id']},
            {'$set': {'project_id': data['project_id'],
                      'user_id': data['user_id'],
                      'source': data['source'],
                      },
             '$setOnInsert': {'metadata': data['resource_metadata'],
                              'first_sample_timestamp': data['timestamp'],
                              'last_sample_timestamp': data['timestamp'],
                              },
             '$addToSet': {'meter': {'counter_name': data['counter_name'],
                                     'counter_type': data['counter_type'],
                                     'counter_unit': data['counter_unit'],
                                     },
                           },
             },
            upsert=True,
            new=True,
        )

        # only update last sample timestamp if actually later (the usual
        # in-order case)
        last_sample_timestamp = resource.get('last_sample_timestamp')
        if (last_sample_timestamp is None or
                last_sample_timestamp <= data['timestamp']):
            self.db.resource.update(
                {'_id': data['resource_id']},
                {'$set': {'metadata': data['resource_metadata'],
                          'last_sample_timestamp': data['timestamp']}}
            )

        # only update first sample timestamp if actually earlier (the unusual
        # out-of-order case)
        # NOTE: a null first sample timestamp is not updated as this indicates
        # a pre-existing resource document dating from before we started
        # recording these timestamps in the resource collection
        first_sample_timestamp = resource.get('first_sample_timestamp')
        if (first_sample_timestamp is not None and
                first_sample_timestamp > data['timestamp']):
            self.db.resource.update(
                {'_id': data['resource_id']},
                {'$set': {'first_sample_timestamp': data['timestamp']}}
            )

        # Record the raw data for the meter. Use a copy so we do not
        # modify a data structure owned by our caller (the driver adds
        # a new key '_id').
        record = copy.copy(data)
        record['recorded_at'] = timeutils.utcnow()
        self.db.meter.insert(record)
Exemplo n.º 9
0
    def setUp(self):
        super(BasePipelineTestCase, self).setUp()

        self.test_counter = sample.Sample(
            name='a',
            type=sample.TYPE_GAUGE,
            volume=1,
            unit='B',
            user_id="test_user",
            project_id="test_proj",
            resource_id="test_resource",
            timestamp=timeutils.utcnow().isoformat(),
            resource_metadata={})

        self.useFixture(
            mockpatch.PatchObject(transformer.TransformerExtensionManager,
                                  "__init__",
                                  side_effect=self.fake_tem_init))

        self.useFixture(
            mockpatch.PatchObject(transformer.TransformerExtensionManager,
                                  "get_ext",
                                  side_effect=self.fake_tem_get_ext))

        self.useFixture(
            mockpatch.PatchObject(publisher,
                                  'get_publisher',
                                  side_effect=self.get_publisher))

        self.transformer_manager = transformer.TransformerExtensionManager()

        self._setup_pipeline_cfg()
Exemplo n.º 10
0
    def get_samples(self, manager, cache, resources=None):
        resources = resources or []

        for fw in resources:
            LOG.debug("Firewall : %s" % fw)
            status = self.get_status_id(fw['status'])
            if status == -1:
                # unknown status, skip this sample
                LOG.warn(
                    _("Unknown status %(stat)s received on fw %(id)s,"
                      "skipping sample") % {
                          'stat': fw['status'],
                          'id': fw['id']
                      })
                continue

            yield sample.Sample(name='network.services.firewall',
                                type=sample.TYPE_GAUGE,
                                unit='firewall',
                                volume=status,
                                user_id=None,
                                project_id=fw['tenant_id'],
                                resource_id=fw['id'],
                                timestamp=timeutils.utcnow().isoformat(),
                                resource_metadata=self.extract_metadata(fw))
Exemplo n.º 11
0
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """

        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            sample_q = (session.query(
                models.Sample).filter(models.Sample.timestamp < end))

            sample_subq = sample_q.subquery()
            for table in [
                    models.MetaText, models.MetaBigInt, models.MetaFloat,
                    models.MetaBool
            ]:
                (session.query(table).join(
                    sample_subq, sample_subq.c.id == table.id).delete())

            rows = sample_q.delete()
            # remove Meter definitions with no matching samples
            (session.query(
                models.Meter).filter(~models.Meter.samples.any()).delete(
                    synchronize_session='fetch'))
            LOG.info(_("%d samples removed from database"), rows)
Exemplo n.º 12
0
    def post(self, data):
        """Create a new alarm."""
        conn = pecan.request.storage_conn

        data.user_id = pecan.request.headers.get('X-User-Id')
        data.project_id = pecan.request.headers.get('X-Project-Id')
        data.alarm_id = wsme.Unset
        data.state_timestamp = wsme.Unset
        data.timestamp = timeutils.utcnow()

        # make sure alarms are unique by name per project.
        alarms = list(conn.get_alarms(name=data.name, project=data.project_id))
        if len(alarms) > 0:
            error = _("Alarm with that name exists")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        try:
            kwargs = data.as_dict(storage.models.Alarm)
            alarm_in = storage.models.Alarm(**kwargs)
        except Exception as ex:
            LOG.exception(ex)
            error = _("Alarm incorrect")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        alarm = conn.update_alarm(alarm_in)
        return Alarm.from_db_model(alarm)
Exemplo n.º 13
0
    def test_message_to_event_missing_keys(self):
        now = timeutils.utcnow()
        timeutils.set_time_override(now)
        message = {'event_type': "foo",
                   'message_id': "abc",
                   'publisher_id': "1"}

        mock_dispatcher = MagicMock()
        self.srv.dispatcher_manager = test_manager.TestExtensionManager(
            [extension.Extension('test',
                                 None,
                                 None,
                                 mock_dispatcher
                                 ),
             ])

        with patch('ceilometer.collector.service.LOG') as mylog:
            self.srv._message_to_event(message)
            self.assertFalse(mylog.exception.called)
        events = mock_dispatcher.record_events.call_args[0]
        self.assertEqual(1, len(events))
        event = events[0]
        self.assertEqual("foo", event.event_name)
        self.assertEqual(now, event.generated)
        self.assertEqual(1, len(event.traits))
Exemplo n.º 14
0
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """

        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            sample_q = (session.query(models.Sample)
                        .filter(models.Sample.timestamp < end))

            sample_subq = sample_q.subquery()
            for table in [models.MetaText, models.MetaBigInt,
                          models.MetaFloat, models.MetaBool]:
                (session.query(table)
                 .join(sample_subq, sample_subq.c.id == table.id)
                 .delete())

            rows = sample_q.delete()
            # remove Meter defintions with no matching samples
            (session.query(models.Meter)
             .filter(~models.Meter.samples.any())
             .delete(synchronize_session='fetch'))
            LOG.info(_("%d samples removed from database"), rows)
Exemplo n.º 15
0
    def setUp(self):
        super(BasePipelineTestCase, self).setUp()

        self.test_counter = sample.Sample(
            name='a',
            type=sample.TYPE_GAUGE,
            volume=1,
            unit='B',
            user_id="test_user",
            project_id="test_proj",
            resource_id="test_resource",
            timestamp=timeutils.utcnow().isoformat(),
            resource_metadata={}
        )

        self.useFixture(mockpatch.PatchObject(
            transformer.TransformerExtensionManager, "__init__",
            side_effect=self.fake_tem_init))

        self.useFixture(mockpatch.PatchObject(
            transformer.TransformerExtensionManager, "get_ext",
            side_effect=self.fake_tem_get_ext))

        self.useFixture(mockpatch.PatchObject(
            publisher, 'get_publisher', side_effect=self.get_publisher))

        self.transformer_manager = transformer.TransformerExtensionManager()

        self._setup_pipeline_cfg()
Exemplo n.º 16
0
    def test_message_to_event_missing_keys(self):
        now = timeutils.utcnow()
        timeutils.set_time_override(now)
        message = {'event_type': "foo",
                   'message_id': "abc",
                   'publisher_id': "1"}

        mock_dispatcher = MagicMock()
        self.srv.dispatcher_manager = test_manager.TestExtensionManager(
            [extension.Extension('test',
                                 None,
                                 None,
                                 mock_dispatcher
                                 ),
             ])

        with patch('ceilometer.collector.service.LOG') as mylog:
            self.srv._message_to_event(message)
            self.assertFalse(mylog.exception.called)
        events = mock_dispatcher.record_events.call_args[0]
        self.assertEqual(1, len(events))
        event = events[0]
        self.assertEqual("foo", event.event_name)
        self.assertEqual(now, event.generated)
        self.assertEqual(1, len(event.traits))
Exemplo n.º 17
0
    def clear_expired_metering_data(ttl):
        """Clear expired data from the backend storage system according to the
        time-to-live.

        :param ttl: Number of seconds to keep records for.

        """
        session = sqlalchemy_session.get_session()
        query = session.query(Meter.id)
        end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
        query = query.filter(Meter.timestamp < end)
        query.delete()

        query = session.query(User.id).filter(~User.id.in_(
            session.query(Meter.user_id).group_by(Meter.user_id)
        ))
        query.delete(synchronize_session='fetch')

        query = session.query(Project.id).filter(~Project.id.in_(
            session.query(Meter.project_id).group_by(Meter.project_id)
        ))
        query.delete(synchronize_session='fetch')

        query = session.query(Resource.id).filter(~Resource.id.in_(
            session.query(Meter.resource_id).group_by(Meter.resource_id)
        ))
        query.delete(synchronize_session='fetch')
Exemplo n.º 18
0
    def get_samples(self, manager, cache, resources=None):
        resources = resources or []

        for vip in resources:
            LOG.debug("Load Balancer Vip : %s" % vip)
            status = self.get_status_id(vip['status'])
            if status == -1:
                # unknown status, skip this sample
                LOG.warn(
                    _("Unknown status %(stat)s received on vip %(id)s, "
                      "skipping sample") % {
                          'stat': vip['status'],
                          'id': vip['id']
                      })
                continue

            yield sample.Sample(name='network.services.lb.vip',
                                type=sample.TYPE_GAUGE,
                                unit='vip',
                                volume=status,
                                user_id=None,
                                project_id=vip['tenant_id'],
                                resource_id=vip['id'],
                                timestamp=timeutils.utcnow().isoformat(),
                                resource_metadata=self.extract_metadata(vip))
Exemplo n.º 19
0
    def clear_expired_metering_data(ttl):
        """Clear expired data from the backend storage system according to the
        time-to-live.

        :param ttl: Number of seconds to keep records for.

        """
        session = sqlalchemy_session.get_session()
        query = session.query(Meter.id)
        end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
        query = query.filter(Meter.timestamp < end)
        query.delete()

        query = session.query(User.id).filter(~User.id.in_(
            session.query(Meter.user_id).group_by(Meter.user_id)
        ))
        query.delete(synchronize_session='fetch')

        query = session.query(Project.id).filter(~Project.id.in_(
            session.query(Meter.project_id).group_by(Meter.project_id)
        ))
        query.delete(synchronize_session='fetch')

        query = session.query(Resource.id).filter(~Resource.id.in_(
            session.query(Meter.resource_id).group_by(Meter.resource_id)
        ))
        query.delete(synchronize_session='fetch')
Exemplo n.º 20
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        # Record the updated resource metadata - we use $setOnInsert to
        # unconditionally insert sample timestamps and resource metadata
        # (in the update case, this must be conditional on the sample not
        # being out-of-order)
        resource = self.db.resource.find_and_modify(
            {'_id': data['resource_id']},
            {'$set': {'project_id': data['project_id'],
                      'user_id': data['user_id'],
                      'source': data['source'],
                      },
             '$setOnInsert': {'metadata': data['resource_metadata'],
                              'first_sample_timestamp': data['timestamp'],
                              'last_sample_timestamp': data['timestamp'],
                              },
             '$addToSet': {'meter': {'counter_name': data['counter_name'],
                                     'counter_type': data['counter_type'],
                                     'counter_unit': data['counter_unit'],
                                     },
                           },
             },
            upsert=True,
            new=True,
        )

        # only update last sample timestamp if actually later (the usual
        # in-order case)
        last_sample_timestamp = resource.get('last_sample_timestamp')
        if (last_sample_timestamp is None or
                last_sample_timestamp <= data['timestamp']):
            self.db.resource.update(
                {'_id': data['resource_id']},
                {'$set': {'metadata': data['resource_metadata'],
                          'last_sample_timestamp': data['timestamp']}}
            )

        # only update first sample timestamp if actually earlier (the unusual
        # out-of-order case)
        # NOTE: a null first sample timestamp is not updated as this indicates
        # a pre-existing resource document dating from before we started
        # recording these timestamps in the resource collection
        first_sample_timestamp = resource.get('first_sample_timestamp')
        if (first_sample_timestamp is not None and
                first_sample_timestamp > data['timestamp']):
            self.db.resource.update(
                {'_id': data['resource_id']},
                {'$set': {'first_sample_timestamp': data['timestamp']}}
            )

        # Record the raw data for the meter. Use a copy so we do not
        # modify a data structure owned by our caller (the driver adds
        # a new key '_id').
        record = copy.copy(data)
        record['recorded_at'] = timeutils.utcnow()
        self.db.meter.insert(record)
Exemplo n.º 21
0
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system according to the
        time-to-live.

        :param ttl: Number of seconds to keep records for.

        """
        # Before mongodb 2.2 we need to clear expired data manually
        if not self._is_natively_ttl_supported():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            f = storage.SampleFilter(end=end)
            q = make_query_from_filter(f, require_meter=False)
            self.db.meter.remove(q)

        results = self.db.meter.group(
            key={},
            condition={},
            reduce=self.REDUCE_GROUP_CLEAN,
            initial={
                'resources': [],
                'users': [],
                'projects': [],
            }
        )[0]

        self.db.user.remove({'_id': {'$nin': results['users']}})
        self.db.project.remove({'_id': {'$nin': results['projects']}})
        self.db.resource.remove({'_id': {'$nin': results['resources']}})
Exemplo n.º 22
0
    def setUp(self):
        super(TestPipeline, self).setUp()

        self.test_counter = counter.Counter(
            name='a',
            type='test_type',
            volume=1,
            unit='B',
            user_id="test_user",
            project_id="test_proj",
            resource_id="test_resource",
            timestamp=timeutils.utcnow().isoformat(),
            resource_metadata={}
        )

        self.stubs.Set(pipeline.TransformerExtensionManager,
                       "__init__",
                       self.fake_tem_init)

        self.stubs.Set(pipeline.TransformerExtensionManager,
                       "get_ext",
                       self.fake_tem_get_ext)

        self._create_publisher_manager()
        self.pipeline_cfg = [{
            'name': "test_pipeline",
            'interval': 5,
            'counters': ['a'],
            'transformers': [
                {'name': "update",
                 'parameters': {}}
            ],
            'publishers': ["test"],
        }, ]
Exemplo n.º 23
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        # Record the updated resource metadata
        self.db.resource.update(
            {'_id': data['resource_id']},
            {'$set': {'project_id': data['project_id'],
                      'user_id': data['user_id'] or 'null',
                      'metadata': data['resource_metadata'],
                      'source': data['source'],
                      },
             '$addToSet': {'meter': {'counter_name': data['counter_name'],
                                     'counter_type': data['counter_type'],
                                     'counter_unit': data['counter_unit'],
                                     },
                           },
             },
            upsert=True,
        )

        # Record the raw data for the meter. Use a copy so we do not
        # modify a data structure owned by our caller (the driver adds
        # a new key '_id').
        record = copy.copy(data)
        record['recorded_at'] = timeutils.utcnow()
        # Make sure that the data does have field _id which db2 wont add
        # automatically.
        if record.get('_id') is None:
            record['_id'] = str(bson.objectid.ObjectId())
        self.db.meter.insert(record)
Exemplo n.º 24
0
    def put(self, alarm_id, data):
        """Modify an alarm."""
        conn = pecan.request.storage_conn
        data.state_timestamp = wsme.Unset
        data.alarm_id = alarm_id
        auth_project = acl.get_limited_to_project(pecan.request.headers)

        alarms = list(conn.get_alarms(alarm_id=alarm_id,
                                      project=auth_project))
        if len(alarms) < 1:
            error = _("Unknown alarm")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        # merge the new values from kwargs into the current
        # alarm "alarm_in".
        alarm_in = alarms[0]
        kwargs = data.as_dict(storage.models.Alarm)
        for k, v in kwargs.iteritems():
            setattr(alarm_in, k, v)
            if k == 'state':
                alarm_in.state_timestamp = timeutils.utcnow()

        alarm = conn.update_alarm(alarm_in)
        return Alarm.from_db_model(alarm)
Exemplo n.º 25
0
 def _is_master(self, interval):
     """Determine if the current partition is the master."""
     now = timeutils.utcnow()
     if timeutils.delta_seconds(self.start, now) < interval * 2:
         LOG.debug(_('%s still warming up') % self.this)
         return False
     is_master = True
     for partition, last_heard in self.reports.items():
         delta = timeutils.delta_seconds(last_heard, now)
         LOG.debug(
             _('last heard from %(report)s %(delta)s seconds ago') %
             dict(report=partition, delta=delta))
         if delta > interval * 2:
             del self.reports[partition]
             self._record_oldest(partition, stale=True)
             LOG.debug(
                 _('%(this)s detects stale evaluator: %(stale)s') %
                 dict(this=self.this, stale=partition))
             self.presence_changed = True
         elif partition < self.this:
             is_master = False
             LOG.info(
                 _('%(this)s sees older potential master: %(older)s') %
                 dict(this=self.this, older=partition))
     LOG.info(
         _('%(this)s is master?: %(is_master)s') %
         dict(this=self.this, is_master=is_master))
     return is_master
Exemplo n.º 26
0
    def post(self, data):
        """Create a new alarm."""
        conn = pecan.request.storage_conn

        now = timeutils.utcnow()
        data.alarm_id = str(uuid.uuid4())
        data.user_id = pecan.request.headers.get('X-User-Id')
        data.project_id = pecan.request.headers.get('X-Project-Id')
        data.state_timestamp = wsme.Unset
        change = data.as_dict(storage.models.Alarm)
        data.timestamp = now

        # make sure alarms are unique by name per project.
        alarms = list(conn.get_alarms(name=data.name,
                                      project=data.project_id))
        if len(alarms) > 0:
            error = _("Alarm with that name exists")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        try:
            kwargs = data.as_dict(storage.models.Alarm)
            alarm_in = storage.models.Alarm(**kwargs)
        except Exception as ex:
            LOG.exception(ex)
            error = _("Alarm incorrect")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        alarm = conn.create_alarm(alarm_in)
        self._record_creation(conn, change, alarm.alarm_id, now)
        return Alarm.from_db_model(alarm)
Exemplo n.º 27
0
    def post(self, data):
        """Create a new alarm"""
        conn = pecan.request.storage_conn

        data.user_id = pecan.request.headers.get('X-User-Id')
        data.project_id = pecan.request.headers.get('X-Project-Id')
        data.alarm_id = wsme.Unset
        data.state_timestamp = wsme.Unset
        data.timestamp = timeutils.utcnow()

        # make sure alarms are unique by name per project.
        alarms = list(conn.get_alarms(name=data.name,
                                      project=data.project_id))
        if len(alarms) > 0:
            raise wsme.exc.ClientSideError(_("Alarm with that name exists"))

        try:
            kwargs = data.as_dict(storage.models.Alarm)
            alarm_in = storage.models.Alarm(**kwargs)
        except Exception as ex:
            LOG.exception(ex)
            raise wsme.exc.ClientSideError(_("Alarm incorrect"))

        alarm = conn.update_alarm(alarm_in)
        return Alarm.from_db_model(alarm)
Exemplo n.º 28
0
    def publish_sample(self, env, bytes_received, bytes_sent):
        req = REQUEST.Request(env)
        try:
            version, account, container, obj = utils.split_path(req.path, 2,
                                                                4, True)
        except ValueError:
            return
        now = timeutils.utcnow().isoformat()

        resource_metadata = {
            "path": req.path,
            "version": version,
            "container": container,
            "object": obj,
        }

        for header in self.metadata_headers:
            if header.upper() in req.headers:
                resource_metadata['http_header_%s' % header] = req.headers.get(
                    header.upper())

        with self.pipeline_manager.publisher(
                context.get_admin_context()) as publisher:
            if bytes_received:
                publisher([sample.Sample(
                    name='storage.objects.incoming.bytes',
                    type=sample.TYPE_DELTA,
                    unit='B',
                    volume=bytes_received,
                    user_id=env.get('HTTP_X_USER_ID'),
                    project_id=env.get('HTTP_X_TENANT_ID'),
                    resource_id=account.partition('AUTH_')[2],
                    timestamp=now,
                    resource_metadata=resource_metadata)])

            if bytes_sent:
                publisher([sample.Sample(
                    name='storage.objects.outgoing.bytes',
                    type=sample.TYPE_DELTA,
                    unit='B',
                    volume=bytes_sent,
                    user_id=env.get('HTTP_X_USER_ID'),
                    project_id=env.get('HTTP_X_TENANT_ID'),
                    resource_id=account.partition('AUTH_')[2],
                    timestamp=now,
                    resource_metadata=resource_metadata)])

            # publish the event for each request
            # request method will be recorded in the metadata
            resource_metadata['method'] = req.method.lower()
            publisher([sample.Sample(
                name='storage.api.request',
                type=sample.TYPE_DELTA,
                unit='request',
                volume=1,
                user_id=env.get('HTTP_X_USER_ID'),
                project_id=env.get('HTTP_X_TENANT_ID'),
                resource_id=account.partition('AUTH_')[2],
                timestamp=now,
                resource_metadata=resource_metadata)])
Exemplo n.º 29
0
    def publish_sample(self, env, bytes_received, bytes_sent):
        req = REQUEST.Request(env)
        version, account, container, obj = split_path(req.path, 1, 4, True)
        now = timeutils.utcnow().isoformat()

        resource_metadata = {
            "path": req.path,
            "version": version,
            "container": container,
            "object": obj,
        }

        for header in self.metadata_headers:
            if header.upper() in req.headers:
                resource_metadata['http_header_%s' % header] = req.headers.get(
                    header.upper())

        with self.pipeline_manager.publisher(
                context.get_admin_context()) as publisher:
            if bytes_received:
                publisher([
                    sample.Sample(name='storage.objects.incoming.bytes',
                                  type=sample.TYPE_DELTA,
                                  unit='B',
                                  volume=bytes_received,
                                  user_id=env.get('HTTP_X_USER_ID'),
                                  project_id=env.get('HTTP_X_TENANT_ID'),
                                  resource_id=account.partition('AUTH_')[2],
                                  timestamp=now,
                                  resource_metadata=resource_metadata)
                ])

            if bytes_sent:
                publisher([
                    sample.Sample(name='storage.objects.outgoing.bytes',
                                  type=sample.TYPE_DELTA,
                                  unit='B',
                                  volume=bytes_sent,
                                  user_id=env.get('HTTP_X_USER_ID'),
                                  project_id=env.get('HTTP_X_TENANT_ID'),
                                  resource_id=account.partition('AUTH_')[2],
                                  timestamp=now,
                                  resource_metadata=resource_metadata)
                ])

            # publish the event for each request
            # request method will be recorded in the metadata
            resource_metadata['method'] = req.method.lower()
            publisher([
                sample.Sample(name='storage.api.request',
                              type=sample.TYPE_DELTA,
                              unit='request',
                              volume=1,
                              user_id=env.get('HTTP_X_USER_ID'),
                              project_id=env.get('HTTP_X_TENANT_ID'),
                              resource_id=account.partition('AUTH_')[2],
                              timestamp=now,
                              resource_metadata=resource_metadata)
            ])
Exemplo n.º 30
0
    def _extract_when(body):
        """Extract the generated datetime from the notification.
        """
        when = body.get('timestamp', body.get('_context_timestamp'))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
Exemplo n.º 31
0
 def test_sample_filter_timestamp_invalid_op(self):
     ts_start = timeutils.utcnow()
     q = [api.Query(field='timestamp',
                    op='eq',
                    value=str(ts_start))]
     self.assertRaises(
         wsme.exc.InvalidInput,
         api._query_to_kwargs, q, storage.SampleFilter.__init__)
Exemplo n.º 32
0
 def test_ordination_presence(self):
     id = uuid.uuid4()
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     method, args = self.notified[0]
     self.assertEqual(id, args['data']['uuid'])
     self.assertEqual(priority, args['data']['priority'])
     self.assertEqual('presence', method)
Exemplo n.º 33
0
 def soft_delete(self, synchronize_session='evaluate'):
     return self.update(
         {
             'deleted': literal_column('id'),
             'updated_at': literal_column('updated_at'),
             'deleted_at': timeutils.utcnow()
         },
         synchronize_session=synchronize_session)
Exemplo n.º 34
0
    def _extract_when(body):
        """Extract the generated datetime from the notification.
        """
        when = body.get('timestamp', body.get('_context_timestamp'))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
Exemplo n.º 35
0
 def test_ordination_presence(self):
     id = uuid.uuid4()
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     method, args = self.notified[0]
     self.assertEqual(id, args['data']['uuid'])
     self.assertEqual(priority, args['data']['priority'])
     self.assertEqual('presence', method)
Exemplo n.º 36
0
    def setUp(self):
        super(TestCollector, self).setUp()
        self.CONF = self.useFixture(config.Config()).conf
        self.CONF.import_opt("connection",
                             "ceilometer.openstack.common.db.options",
                             group="database")
        self.CONF.set_override("connection", "log://", group='database')
        self.CONF.set_override('metering_secret',
                               'not-so-secret',
                               group='publisher')
        self._setup_messaging()

        self.counter = sample.Sample(
            name='foobar',
            type='bad',
            unit='F',
            volume=1,
            user_id='jd',
            project_id='ceilometer',
            resource_id='cat',
            timestamp=timeutils.utcnow().isoformat(),
            resource_metadata={},
        ).as_dict()

        self.utf8_msg = utils.meter_message_from_counter(
            sample.Sample(
                name=u'test',
                type=sample.TYPE_CUMULATIVE,
                unit=u'',
                volume=1,
                user_id=u'test',
                project_id=u'test',
                resource_id=u'test_run_tasks',
                timestamp=timeutils.utcnow().isoformat(),
                resource_metadata={u'name': [([u'TestPublish'])]},
                source=u'testsource',
            ), 'not-so-secret')

        self.srv = collector.CollectorService()

        self.useFixture(
            mockpatch.PatchObject(
                self.srv.tg,
                'add_thread',
                side_effect=self._dummy_thread_group_add_thread))
Exemplo n.º 37
0
 def delete(self):
     """Delete this alarm."""
     # ensure alarm exists before deleting
     alarm = self._alarm()
     self.conn.delete_alarm(alarm.alarm_id)
     change = Alarm.from_db_model(alarm).as_dict(storage.models.Alarm)
     self._record_change(change,
                         timeutils.utcnow(),
                         type=storage.models.AlarmChange.DELETION)
Exemplo n.º 38
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        with self.conn_pool.connection() as conn:
            project_table = conn.table(self.PROJECT_TABLE)
            user_table = conn.table(self.USER_TABLE)
            resource_table = conn.table(self.RESOURCE_TABLE)
            meter_table = conn.table(self.METER_TABLE)

            # Make sure we know about the user and project
            if data['user_id']:
                self._update_sources(user_table, data['user_id'],
                                     data['source'])
            self._update_sources(project_table, data['project_id'],
                                 data['source'])

            # Get metadata from user's data
            resource_metadata = data.get('resource_metadata', {})
            # Determine the name of new meter
            new_meter = _format_meter_reference(
                data['counter_name'], data['counter_type'],
                data['counter_unit'])
            flatten_result, sources, meters, metadata = \
                deserialize_entry(resource_table.row(data['resource_id']))

            # Update if resource has new information
            if (data['source'] not in sources) or (
                    new_meter not in meters) or (
                    metadata != resource_metadata):
                resource_table.put(data['resource_id'],
                                   serialize_entry(
                                       **{'sources': [data['source']],
                                          'meters': [new_meter],
                                          'metadata': resource_metadata,
                                          'resource_id': data['resource_id'],
                                          'project_id': data['project_id'],
                                          'user_id': data['user_id']}))

            # Rowkey consists of reversed timestamp, meter and an md5 of
            # user+resource+project for purposes of uniqueness
            m = hashlib.md5()
            m.update("%s%s%s" % (data['user_id'], data['resource_id'],
                                 data['project_id']))

            # We use reverse timestamps in rowkeys as they are sorted
            # alphabetically.
            rts = reverse_timestamp(data['timestamp'])
            row = "%s_%d_%s" % (data['counter_name'], rts, m.hexdigest())
            record = serialize_entry(data, **{'metadata': resource_metadata,
                                              'rts': rts,
                                              'message': data,
                                              'recorded_at': timeutils.utcnow(
                                              )})
            meter_table.put(row, record)
Exemplo n.º 39
0
 def test_ordination_presence(self):
     id = str(uuid.uuid4())
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     self.coordinator_server.rpc.wait()
     method, args = self.coordinator_server.notified[0]
     self.assertEqual(id, args['uuid'])
     self.assertEqual(priority, args['priority'])
     self.assertEqual('presence', method)
Exemplo n.º 40
0
 def test_coordination_presence(self):
     id = str(uuid.uuid4())
     priority = float(timeutils.utcnow().strftime("%s.%f"))
     self.coordination.presence(id, priority)
     self.coordinator_server.rpc.wait()
     method, args = self.coordinator_server.notified[0]
     self.assertEqual(id, args["uuid"])
     self.assertEqual(priority, args["priority"])
     self.assertEqual("presence", method)
Exemplo n.º 41
0
 def test_ordination_presence(self):
     id = uuid.uuid4()
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     topic, msg = self.notified[0]
     self.assertEqual(topic, 'alarm_partition_coordination')
     self.assertEqual(msg['args']['data']['uuid'], id)
     self.assertEqual(msg['args']['data']['priority'], priority)
     self.assertEqual(msg['method'], 'presence')
Exemplo n.º 42
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        with self.conn_pool.connection() as conn:
            project_table = conn.table(self.PROJECT_TABLE)
            user_table = conn.table(self.USER_TABLE)
            resource_table = conn.table(self.RESOURCE_TABLE)
            meter_table = conn.table(self.METER_TABLE)

            if data['user_id']:
                user_table.put(data['user_id'],
                               serialize_entry(**{'source': data['source']}))

            project_table.put(data['project_id'],
                              serialize_entry(**{'source': data['source']}))

            resource_metadata = data.get('resource_metadata', {})
            # Determine the name of new meter
            new_meter = _format_meter_reference(data['counter_name'],
                                                data['counter_type'],
                                                data['counter_unit'],
                                                data['source'])
            #TODO(nprivalova): try not to store resource_id
            resource = serialize_entry(
                **{
                    'source': data['source'],
                    'meter': new_meter,
                    'resource_metadata': resource_metadata,
                    'resource_id': data['resource_id'],
                    'project_id': data['project_id'],
                    'user_id': data['user_id']
                })
            resource_table.put(data['resource_id'], resource)

            #TODO(nprivalova): improve uniqueness
            # Rowkey consists of reversed timestamp, meter and an md5 of
            # user+resource+project for purposes of uniqueness
            m = hashlib.md5()
            m.update(
                "%s%s%s" %
                (data['user_id'], data['resource_id'], data['project_id']))

            # We use reverse timestamps in rowkeys as they are sorted
            # alphabetically.
            rts = reverse_timestamp(data['timestamp'])
            row = "%s_%d_%s" % (data['counter_name'], rts, m.hexdigest())
            record = serialize_entry(
                data, **{
                    'source': data['source'],
                    'rts': rts,
                    'message': data,
                    'recorded_at': timeutils.utcnow()
                })
            meter_table.put(row, record)
Exemplo n.º 43
0
 def presence(self, uuid, priority):
     """Accept an incoming report of presence."""
     report = PartitionIdentity(uuid, priority)
     if report != self.this:
         if report not in self.reports:
             self.presence_changed = True
         self._record_oldest(report)
         self.reports[report] = timeutils.utcnow()
         LOG.debug(_("%(this)s knows about %(reports)s") % dict(this=self.this, reports=self.reports))
Exemplo n.º 44
0
 def test_ordination_presence(self):
     id = str(uuid.uuid4())
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     self.coordinator_server.rpc.wait()
     method, args = self.coordinator_server.notified[0]
     self.assertEqual(id, args['uuid'])
     self.assertEqual(priority, args['priority'])
     self.assertEqual('presence', method)
Exemplo n.º 45
0
 def test_ordination_presence(self):
     id = uuid.uuid4()
     priority = float(timeutils.utcnow().strftime('%s.%f'))
     self.ordination.presence(id, priority)
     topic, msg = self.notified[0]
     self.assertEqual(topic, 'alarm_partition_coordination')
     self.assertEqual(msg['args']['data']['uuid'], id)
     self.assertEqual(msg['args']['data']['priority'], priority)
     self.assertEqual(msg['method'], 'presence')
Exemplo n.º 46
0
    def test_notification_service(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher
        self.srv.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if len(self.publisher.samples) >= self.expected_samples:
                break
            eventlet.sleep(0)

        self.srv.stop()

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
Exemplo n.º 47
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
Exemplo n.º 48
0
    def setUp(self):
        super(TestCollector, self).setUp()
        self.CONF = self.useFixture(fixture_config.Config()).conf
        self.CONF.import_opt("connection",
                             "ceilometer.openstack.common.db.options",
                             group="database")
        self.CONF.set_override("connection", "log://", group='database')
        self.CONF.set_override('metering_secret', 'not-so-secret',
                               group='publisher')
        self._setup_messaging()

        self.counter = sample.Sample(
            name='foobar',
            type='bad',
            unit='F',
            volume=1,
            user_id='jd',
            project_id='ceilometer',
            resource_id='cat',
            timestamp=timeutils.utcnow().isoformat(),
            resource_metadata={},
        ).as_dict()

        self.utf8_msg = utils.meter_message_from_counter(
            sample.Sample(
                name=u'test',
                type=sample.TYPE_CUMULATIVE,
                unit=u'',
                volume=1,
                user_id=u'test',
                project_id=u'test',
                resource_id=u'test_run_tasks',
                timestamp=timeutils.utcnow().isoformat(),
                resource_metadata={u'name': [([u'TestPublish'])]},
                source=u'testsource',
            ),
            'not-so-secret')

        self.srv = collector.CollectorService()

        self.useFixture(mockpatch.PatchObject(
            self.srv.tg, 'add_thread',
            side_effect=self._dummy_thread_group_add_thread))
Exemplo n.º 49
0
 def presence(self, uuid, priority):
     """Accept an incoming report of presence."""
     report = PartitionIdentity(uuid, priority)
     if report != self.this:
         if report not in self.reports:
             self.presence_changed = True
         self._record_oldest(report)
         self.reports[report] = timeutils.utcnow()
         LOG.debug(_('%(this)s knows about %(reports)s') %
                   dict(this=self.this, reports=self.reports))
Exemplo n.º 50
0
    def test_notification_service(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher
        self.srv.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if len(self.publisher.samples) >= self.expected_samples:
                break
            eventlet.sleep(0)

        self.srv.stop()

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
Exemplo n.º 51
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(_("%s not in valid priorities") % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(
        message_id=str(uuid.uuid4()),
        publisher_id=publisher_id,
        event_type=event_type,
        priority=priority,
        payload=payload,
        timestamp=str(timeutils.utcnow()),
    )

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(
                _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s")
                % dict(e=e, payload=payload)
            )
Exemplo n.º 52
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = select(
        columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1
    )

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = select([table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(
                _("Deleting duplicated row with id: %(id)s from table: " "%(table)s")
                % dict(id=row[0], table=table_name)
            )

        if use_soft_delete:
            delete_statement = (
                table.update()
                .where(delete_condition)
                .values(
                    {
                        "deleted": literal_column("id"),
                        "updated_at": literal_column("updated_at"),
                        "deleted_at": timeutils.utcnow(),
                    }
                )
            )
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Exemplo n.º 53
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(
                            _('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
Exemplo n.º 54
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(
            _('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(_("Problem '%(e)s' attempting to "
                            "send to notification system. "
                            "Payload=%(payload)s")
                          % dict(e=e, payload=payload))
Exemplo n.º 55
0
def send_sample():
    cfg.CONF.register_cli_opts([
        cfg.StrOpt('sample-name', short='n', help='Meter name.',
                   required=True),
        cfg.StrOpt('sample-type',
                   short='y',
                   help='Meter type (gauge, delta, cumulative).',
                   default='gauge',
                   required=True),
        cfg.StrOpt('sample-unit', short='U', help='Meter unit.', default=None),
        cfg.IntOpt('sample-volume',
                   short='l',
                   help='Meter volume value.',
                   default=1),
        cfg.StrOpt('sample-resource',
                   short='r',
                   help='Meter resource id.',
                   required=True),
        cfg.StrOpt('sample-user', short='u', help='Meter user id.'),
        cfg.StrOpt('sample-project', short='p', help='Meter project id.'),
        cfg.StrOpt('sample-timestamp',
                   short='i',
                   help='Meter timestamp.',
                   default=timeutils.utcnow().isoformat()),
        cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'),
    ])

    service.prepare_service()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    pipeline_manager = pipeline.setup_pipeline(
        transformer.TransformerExtensionManager('ceilometer.transformer', ), )

    with pipeline_manager.publisher(context.get_admin_context()) as p:
        p([
            sample.Sample(name=cfg.CONF.sample_name,
                          type=cfg.CONF.sample_type,
                          unit=cfg.CONF.sample_unit,
                          volume=cfg.CONF.sample_volume,
                          user_id=cfg.CONF.sample_user,
                          project_id=cfg.CONF.sample_project,
                          resource_id=cfg.CONF.sample_resource,
                          timestamp=cfg.CONF.sample_timestamp,
                          resource_metadata=cfg.CONF.sample_metadata
                          and eval(cfg.CONF.sample_metadata))
        ])
Exemplo n.º 56
0
 def test_bound_duration(self):
     timeutils.utcnow.override_time = datetime.datetime(2012, 7, 2, 10, 45)
     constraint = self.evaluator._bound_duration(self.alarms[0], [])
     self.assertEqual(constraint, [
         {'field': 'timestamp',
          'op': 'le',
          'value': timeutils.utcnow().isoformat()},
         {'field': 'timestamp',
          'op': 'ge',
          'value': '2012-07-02T10:39:00'},
     ])