Ejemplo n.º 1
0
    def get_one(self, message_id):
        """Return a single event with the given message id.

        :param message_id: Message ID of the Event to be returned
        """
        rbac.enforce("events:show", pecan.request)
        filters = _build_rbac_query_filters()
        t_filter = filters['t_filter']
        admin_proj = filters['admin_proj']
        event_filter = storage.EventFilter(traits_filter=t_filter,
                                           admin_proj=admin_proj,
                                           message_id=message_id)
        events = [
            event for event in pecan.request.event_storage_conn.get_events(
                event_filter)
        ]
        if not events:
            raise base.EntityNotFound(_("Event"), message_id)

        if len(events) > 1:
            LOG.error(
                _("More than one event with "
                  "id %s returned from storage driver") % message_id)

        event = events[0]

        return Event(message_id=event.message_id,
                     event_type=event.event_type,
                     generated=event.generated,
                     traits=event.traits,
                     raw=event.raw)
Ejemplo n.º 2
0
    def get_one(self, message_id):
        """Return a single event with the given message id.

        :param message_id: Message ID of the Event to be returned
        """
        event_filter = storage.EventFilter(message_id=message_id)
        events = [
            event for event in pecan.request.event_storage_conn.get_events(
                event_filter)
        ]
        if not events:
            raise base.EntityNotFound(_("Event"), message_id)

        if len(events) > 1:
            LOG.error(
                _("More than one event with "
                  "id %s returned from storage driver") % message_id)

        event = events[0]

        return Event(message_id=event.message_id,
                     event_type=event.event_type,
                     generated=event.generated,
                     traits=event.traits,
                     raw=event.raw)
Ejemplo n.º 3
0
    def get_one(self, pipeline_name):
        """Retrieve details about one pipeline.

        :param pipeline_name: The name of the pipeline.
        """
        # authorized_project = acl.get_limited_to_project(
        #                           pecan.request.headers)
        pipelines = self.get_pipelines()
        for p in pipelines:
            # The pipeline name HERE is the same as the sink name
            if p.get('name', None) == pipeline_name:
                return Pipeline.from_dict(p)
        raise base.EntityNotFound(_('Pipeline'), pipeline_name)
Ejemplo n.º 4
0
    def get_one(self, resource_id):
        """Retrieve details about one resource.

        :param resource_id: The UUID of the resource.
        """

        rbac.enforce('get_resource', pecan.request)

        authorized_project = rbac.get_limited_to_project(pecan.request.headers)
        resources = list(pecan.request.storage_conn.get_resources(
            resource=resource_id, project=authorized_project))
        if not resources:
            raise base.EntityNotFound(_('Resource'), resource_id)
        return Resource.from_db_and_links(resources[0],
                                          self._resource_links(resource_id))
Ejemplo n.º 5
0
    def get_one(self, sample_id):
        """Return a sample.

        :param sample_id: the id of the sample.
        """

        rbac.enforce('get_sample', pecan.request)

        f = storage.SampleFilter(message_id=sample_id)

        samples = list(pecan.request.storage_conn.get_samples(f))
        if len(samples) < 1:
            raise base.EntityNotFound(_('Sample'), sample_id)

        return Sample.from_db_model(samples[0])
Ejemplo n.º 6
0
    def get_one(self, resource_id):
        """Retrieve details about one resource.

        :param resource_id: The UUID of the resource.
        """
        rbac.enforce('get_resource', pecan.request)
        # In case we have special character in resource id, for example, swift
        # can generate samples with resource id like
        # 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance
        resource_id = urllib.parse.unquote(resource_id)

        authorized_project = rbac.get_limited_to_project(pecan.request.headers)
        resources = list(pecan.request.storage_conn.get_resources(
            resource=resource_id, project=authorized_project))
        if not resources:
            raise base.EntityNotFound(_('Resource'), resource_id)
        return Resource.from_db_and_links(resources[0],
                                          self._resource_links(resource_id))
Ejemplo n.º 7
0
    def validate_alarm(cls, alarm):
        super(MetricOfResourceRule, cls).validate_alarm(alarm)

        rule = alarm.gnocchi_resources_threshold_rule
        ks_client = keystone_client.get_client()
        gnocchi_url = cfg.CONF.alarms.gnocchi_url
        headers = {
            'Content-Type': "application/json",
            'X-Auth-Token': ks_client.auth_token
        }
        try:
            r = requests.get(
                "%s/v1/resource/%s/%s" %
                (gnocchi_url, rule.resource_type, rule.resource_id),
                headers=headers)
        except requests.ConnectionError as e:
            raise GnocchiUnavailable(e)
        if r.status_code == 404:
            raise base.EntityNotFound('gnocchi resource', rule.resource_id)
        elif r.status_code // 200 != 1:
            raise base.ClientSideError(r.content, status_code=r.status_code)
Ejemplo n.º 8
0
    def put(self, data):
        """Modify this pipeline."""
        # authorized_project = acl.get_limited_to_project(
        #                           pecan.request.headers)
        # note(sileht): workaround for
        # https://bugs.launchpad.net/wsme/+bug/1220678
        Pipeline.validate(data)

        # Get the matching csv publisher for the pipeline.
        yaml_cfg = []
        conf = pecan.request.cfg
        cfg_file = conf.pipeline_cfg_file
        if not os.path.exists(cfg_file):
            cfg_file = conf.find_file(cfg_file)
        with open(cfg_file) as fap:
            filedata = fap.read()
            yaml_cfg = yaml.safe_load(filedata)

        # Parse and convert to Pipeline objects
        pipelines = self.get_pipelines_from_cfg(conf, yaml_cfg)

        csv = None
        for p in pipelines:
            if p.sink.name == data.name:
                for pb in p.sink.publishers:
                    if isinstance(pb, csvfile.CSVFilePublisher):
                        csv = pb
                        break

        if not csv:
            raise base.EntityNotFound(_('Pipeline'), data.name)

        # Alter the settings.
        csv.change_settings(enabled=data.enabled,
                            location=data.location,
                            max_bytes=data.max_bytes,
                            backup_count=data.backup_count,
                            compress=data.compress)
        new_csv = csv.as_yaml()

        # Set new_csv as the matching csvfile publisher for the sink
        sinks = yaml_cfg.get('sinks', [])
        for sink in sinks:
            if sink.get('name') == data.name:
                publishers = sink.get('publishers', [])
                for index, item in enumerate(publishers):
                    if item.strip().lower().startswith('csvfile:'):
                        publishers[index] = new_csv

        # Re-process in-memory version of yaml to prove it is still good
        # Should show throw an exception if syntax became invalid

        pipelines = self.get_pipelines_from_cfg(conf, yaml_cfg)

        # Must be outputted as a single yaml (ie: use dump ,not dump_all)
        stream = file(cfg_file, 'w')
        # Write a YAML representation of data to 'document.yaml'.
        yaml.safe_dump(yaml_cfg, stream, default_flow_style=False)
        stream.close()

        # Emit SIGHUP to all ceilometer processes to re-read config
        hup_list = ['ceilometer-collector', 'ceilometer-agent-notification']
        for h in hup_list:
            p1 = subprocess.Popen(['pgrep', '-f', h], stdout=subprocess.PIPE)
            subprocess.Popen(['xargs', '--no-run-if-empty', 'kill', '-HUP'],
                             stdin=p1.stdout,
                             stdout=subprocess.PIPE)
        pecan.request.pipeline_manager = \
            pipeline_t.setup_pipeline(pecan.request.cfg)
        # Re-parse and return value from file
        # This protects us if the file was not stored properly
        pipelines = self.get_pipelines()
        for p in pipelines:
            if p.get('name', None) == data.name:
                return Pipeline.from_dict(p)
        # If we are here, the yaml got corrupted somehow
        raise wsme.exc.ClientSideError(unicode('pipeline yaml is corrupted'))