コード例 #1
0
    def create(self, req, body):
        '''
        :request url: /v2/{tenant_id}/pssr-url/{user_id}
        :param req: user_id:the user to caculate
        :param request: input user data
        :return:

        '''
        import time
        request = body["pssr_tag"]
        LOG.info("PssrController pssr_conf,req = %s" % req)
        LOG.info("PssrController pssr_conf,request = %s" % request)
        context = req.environ['nova.context']
        physerver = request["physerver"]
        node_id = self.get_node_id_from_name(context, physerver)
        physerver_name = db.compute_node_get(
            context, node_id)["host"]  #name include domain

        intf_conf = self.pssr_get_intf_conf(context, node_id, request["name"])
        intf_conf = intf_conf["pssr_tag"]["results"]

        #repeat config or have been config other
        if intf_conf["network_type"] != "" and request[
                "network_type"] != "none":
            return {
                "pssr_tag": {
                    "results": "config fail,network have been config"
                }
            }
        if intf_conf["network_type"] == "" and request[
                "network_type"] == "none":
            return {"pssr_tag": {"results": "config fail,repeat config"}}

        request["last_network_type"] = intf_conf["network_type"]
        cctxt = rpc.get_client(
            messaging.Target(topic="compute", server=physerver_name))
        cctxt = cctxt.prepare(version='4.11')
        pid1 = cctxt.call(get_admin_context(),
                          "get_nova_compute_pid",
                          req=request)
        cctxt.cast(get_admin_context(), "nfvi_pssr_conf", req=request)
        for i in range(0, 10):
            time.sleep(1)
            pid2 = cctxt.call(get_admin_context(),
                              "get_nova_compute_pid",
                              req=request)
            #LOG.info("PssrController nfvi_pssr_conf,pid1 = %s,pid2 = %s" % (pid1,pid2))
            if pid1 == pid2:
                continue
            else:
                results = {"status": "success"}
                self.pssr_update_intf_conf(context, node_id, intf_conf,
                                           request)
                LOG.info(
                    "PssrController nfvi_pssr_conf,pid2 = %s,results = %s" %
                    (pid2, results))
                return {"pssr_tag": {"results": results}}

        LOG.error("nfvi_pssr_conf get_nova_compute_pid fail,pid2 = %s", pid2)
        return {"pssr_tag": {"results": "config fail"}}
コード例 #2
0
 def test_admin_no_overwrite(self):
     # If there is already a context in the cache creating an admin
     # context will not overwrite it.
     ctx1 = context.RequestContext(overwrite=True)
     context.get_admin_context()
     self.assertIs(context.get_current(), ctx1)
     self.assertFalse(ctx1.is_admin)
コード例 #3
0
 def setUp(self):
     super(TestImagePollster, self).setUp()
     self.context = context.get_admin_context()
     self.manager = TestManager()
     self.useFixture(mockpatch.PatchObject(
         glance._Base, 'get_glance_client',
         side_effect=self.fake_get_glance_client))
コード例 #4
0
    def _process_response(self, request, response=None):
        # NOTE(gordc): handle case where error processing request
        if 'cadf_event' not in request.environ:
            self._create_event(request)
        event = request.environ['cadf_event']

        if response:
            if response.status_int >= 200 and response.status_int < 400:
                result = taxonomy.OUTCOME_SUCCESS
            else:
                result = taxonomy.OUTCOME_FAILURE
            event.reason = reason.Reason(
                reasonType='HTTP', reasonCode=str(response.status_int))
        else:
            result = taxonomy.UNKNOWN

        event.outcome = result
        event.add_reporterstep(
            reporterstep.Reporterstep(
                role=cadftype.REPORTER_ROLE_MODIFIER,
                reporter=resource.Resource(id='target'),
                reporterTime=timestamp.get_utc_now()))

        self._emit_audit(context.get_admin_context().to_dict(),
                         'audit.http.response', event.as_dict())
コード例 #5
0
ファイル: test_engine_api.py プロジェクト: weizai118/mogan
 def test_lock_by_admin(self):
     fake_server = db_utils.get_test_server(user_id=self.user_id,
                                            project_id=self.project_id)
     fake_server_obj = self._create_fake_server_obj(fake_server)
     admin_context = context.get_admin_context()
     self.engine_api.lock(admin_context, fake_server_obj)
     self.assertEqual('admin', fake_server_obj.locked_by)
コード例 #6
0
    def __init__(self):
        """Initialize Brocade Plugin.

        Specify switch address and db configuration.
        """

        super(BrocadePluginV2, self).__init__()
        self.supported_extension_aliases = [
            "binding", "security-group", "external-net", "router",
            "extraroute", "agent", "l3_agent_scheduler", "dhcp_agent_scheduler"
        ]

        self.physical_interface = (
            cfg.CONF.PHYSICAL_INTERFACE.physical_interface)
        self.base_binding_dict = self._get_base_binding_dict()
        portbindings_base.register_port_dict_function()
        self.ctxt = oslo_context.get_admin_context()
        self.ctxt.session = db.get_session()
        self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
        self._setup_rpc()
        self.network_scheduler = importutils.import_object(
            cfg.CONF.network_scheduler_driver)
        self.router_scheduler = importutils.import_object(
            cfg.CONF.router_scheduler_driver)
        self.brocade_init()
コード例 #7
0
 def _insert_sample_data(self, aggregator):
     for _ in range(100):
         sample = copy.copy(self.SAMPLE)
         sample.resource_id = sample.resource_id + str(self._sample_offset)
         sample.timestamp = timeutils.isotime()
         aggregator.handle_sample(context.get_admin_context(), sample)
         self._sample_offset += 1
コード例 #8
0
    def start(self):
        super(RPCService, self).start()
        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        serializer = objects_base.KongmingObjectSerializer()
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()
        admin_context = context.get_admin_context()
        self.tg.add_dynamic_timer(self.manager.periodic_tasks,
                                  periodic_interval_max=CONF.periodic_interval,
                                  context=admin_context)
        LOG.info(
            'Created RPC server for service %(service)s on host '
            '%(host)s.', {
                'service': self.topic,
                'host': self.host
            })
        if self.init_notification_listner:
            transport = messaging.get_notification_transport(CONF)
            targets = [
                messaging.Target(topic='versioned_notifications',
                                 exchange='nova')
            ]
            endpoints = [notification_handler.NotificationEndpoint()]
            self.notification_listener = messaging.get_notification_listener(
                transport,
                targets,
                endpoints,
                executor='threading',
                pool='kongming-notification-handler')

            self.notification_listener.start()
コード例 #9
0
    def test_size_bounded(self):
        aggregator = conversions.AggregatorTransformer(size="100")
        self._insert_sample_data(aggregator)

        samples = aggregator.flush(context.get_admin_context())

        self.assertEqual(100, len(samples))
コード例 #10
0
ファイル: manage.py プロジェクト: yangyuan/oslo.prototype
 def list(self, host=None, service=None):
     """Show a list of all running services. Filter by host & service
     name
     """
     servicegroup_api = servicegroup.API()
     ctxt = context.get_admin_context()
     services = db.service_get_all(ctxt)
     services = availability_zones.set_availability_zones(ctxt, services)
     if host:
         services = [s for s in services if s['host'] == host]
     if service:
         services = [s for s in services if s['binary'] == service]
     print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
     print(print_format % (
                 _('Binary'),
                 _('Host'),
                 _('Zone'),
                 _('Status'),
                 _('State'),
                 _('Updated_At')))
     for svc in services:
         alive = servicegroup_api.service_is_up(svc)
         art = (alive and ":-)") or "XXX"
         active = 'enabled'
         if svc['disabled']:
             active = 'disabled'
         print(print_format % (svc['binary'], svc['host'],
                               svc['availability_zone'], active, art,
                               svc['updated_at']))
コード例 #11
0
ファイル: notification.py プロジェクト: archerslaw/ceilometer
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
コード例 #12
0
 def _start_periodic_killer(self):
     self.old_instance_killer = OldPreemptibleKiller()
     LOG.info('Starting Periodic Killer')
     admin_context = context.get_admin_context()
     self.tg.add_dynamic_timer(
         self.old_instance_killer.periodic_tasks,
         periodic_interval_max=CONF.aardvark.killer_interval,
         context=admin_context)
コード例 #13
0
    def test_size_unbounded(self):
        aggregator = conversions.AggregatorTransformer(size="0",
                                                       retention_time="300")
        self._insert_sample_data(aggregator)

        samples = aggregator.flush(context.get_admin_context())

        self.assertEqual([], samples)
コード例 #14
0
 def setUp(self):
     super(QoSAgentRpcTestCase, self).setUp()
     self.ctxt = oslo_context.get_admin_context()
     self.fake_policy = {"fake": "qos"}
     rpc = mock.Mock()
     rpc.get_policy_for_qos.return_value = self.fake_policy
     self.agent = qos_rpc.QoSAgentRpc(self.ctxt, rpc)
     self.agent.qos = mock.Mock()
コード例 #15
0
ファイル: service.py プロジェクト: yangyuan/oslo.prototype
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         self.conductor_api.service_destroy(context.get_admin_context(),
                                            self.service_id)
     except exception.NotFound:
         LOG.warning(_LW('Service killed that has no database entry'))
コード例 #16
0
 def _start_state_calculation(self):
     self.state_calculator = SystemStateCalculator()
     LOG.info('Starting Periodic System State Calculation')
     admin_context = context.get_admin_context()
     self.tg.add_dynamic_timer(
         self.state_calculator.periodic_tasks,
         periodic_interval_max=CONF.aardvark.periodic_interval,
         context=admin_context)
コード例 #17
0
ファイル: test_engine_api.py プロジェクト: weizai118/mogan
 def test_unlock_by_admin(self):
     fake_server = db_utils.get_test_server(user_id=self.user_id,
                                            project_id=self.project_id,
                                            locked_by='owner')
     fake_server_obj = self._create_fake_server_obj(fake_server)
     admin_context = context.get_admin_context()
     self.engine_api.unlock(admin_context, fake_server_obj)
     self.assertIsNone(fake_server_obj.locked_by)
コード例 #18
0
 def setUp(self):
     super(_BaseTestVPNPollster, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self.manager = manager.AgentManager()
     plugin_base._get_keystone = mock.Mock()
     plugin_base._get_keystone.service_catalog.get_endpoints = (
         mock.MagicMock(return_value={'network': mock.ANY}))
コード例 #19
0
 def assign(self, uuid, alarms):
     cctxt = self.client.prepare(fanout=True)
     return cctxt.cast(context.get_admin_context(),
                       'assign',
                       data={
                           'uuid': uuid,
                           'alarms': alarms
                       })
コード例 #20
0
 def presence(self, uuid, priority):
     cctxt = self.client.prepare(fanout=True)
     return cctxt.cast(context.get_admin_context(),
                       'presence',
                       data={
                           'uuid': uuid,
                           'priority': priority
                       })
コード例 #21
0
 def __init__(self, manager):
     super(EventsNotificationEndpoint, self).__init__()
     LOG.debug(_('Loading event definitions'))
     self.ctxt = context.get_admin_context()
     self.event_converter = event_converter.setup_events(
         extension.ExtensionManager(
             namespace='ceilometer.event.trait_plugin'))
     self.manager = manager
コード例 #22
0
ファイル: test_engine_api.py プロジェクト: weizai118/mogan
 def test_power_locked_server_with_admin(self, mock_powered):
     fake_server = db_utils.get_test_server(user_id=self.user_id,
                                            project_id=self.project_id,
                                            locked_by='owner')
     fake_server_obj = self._create_fake_server_obj(fake_server)
     admin_context = context.get_admin_context()
     self.engine_api.power(admin_context, fake_server_obj, 'reboot')
     self.assertTrue(mock_powered.called)
コード例 #23
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()
        self.listeners, self.pipeline_listeners = [], []

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listeners()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
コード例 #24
0
 def test_notify_alarm_log_action(self):
     self.service.notify_alarm(
         context.get_admin_context(), {
             'actions': ['log://'],
             'alarm_id': 'foobar',
             'condition': {
                 'threshold': 42
             }
         })
コード例 #25
0
    def process_request(self, request):
        request.environ['HTTP_X_SERVICE_NAME'] = \
            self.service_name or request.host
        payload = {
            'request': self.environ_to_dict(request.environ),
        }

        self.notifier.info(context.get_admin_context(), 'http.request',
                           payload)
コード例 #26
0
ファイル: manage.py プロジェクト: yangyuan/oslo.prototype
    def describe_resource(self, host):
        """Describes cpu/memory/hdd info for host.

        :param host: hostname.

        """
        try:
            result = self._show_host_resources(context.get_admin_context(),
                                               host=host)
        except exception.PrototypeException as ex:
            print(_("error: %s") % ex)
            return 2

        if not isinstance(result, dict):
            print(_('An unexpected error has occurred.'))
            print(_('[Result]'), result)
        else:
            # Printing a total and used_now
            # (NOTE)The host name width 16 characters
            print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {"a": _('HOST'),
                                                         "b": _('PROJECT'),
                                                         "c": _('cpu'),
                                                         "d": _('mem(mb)'),
                                                         "e": _('hdd')})
            print(('%(a)-16s(total)%(b)26s%(c)8s%(d)8s' %
                   {"a": host,
                    "b": result['resource']['vcpus'],
                    "c": result['resource']['memory_mb'],
                    "d": result['resource']['local_gb']}))

            print(('%(a)-16s(used_now)%(b)23s%(c)8s%(d)8s' %
                   {"a": host,
                    "b": result['resource']['vcpus_used'],
                    "c": result['resource']['memory_mb_used'],
                    "d": result['resource']['local_gb_used']}))

            # Printing a used_max
            cpu_sum = 0
            mem_sum = 0
            hdd_sum = 0
            for p_id, val in result['usage'].items():
                cpu_sum += val['vcpus']
                mem_sum += val['memory_mb']
                hdd_sum += val['root_gb']
                hdd_sum += val['ephemeral_gb']
            print('%(a)-16s(used_max)%(b)23s%(c)8s%(d)8s' % {"a": host,
                                                             "b": cpu_sum,
                                                             "c": mem_sum,
                                                             "d": hdd_sum})

            for p_id, val in result['usage'].items():
                print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {
                        "a": host,
                        "b": p_id,
                        "c": val['vcpus'],
                        "d": val['memory_mb'],
                        "e": val['root_gb'] + val['ephemeral_gb']})
コード例 #27
0
ファイル: plugin_base.py プロジェクト: VeinFu/ceilometer_ha
 def _process_notifications(self, priority, notifications):
     for notification in notifications:
         try:
             notification = messaging.convert_to_old_notification_format(
                 priority, notification)
             self.to_samples_and_publish(context.get_admin_context(),
                                         notification)
         except Exception:
             LOG.error(_LE('Fail to process notification'), exc_info=True)
コード例 #28
0
 def test_rebuild_locked_server_with_admin(self, mock_rebuild):
     fake_server = db_utils.get_test_server(user_id=self.user_id,
                                            project_id=self.project_id,
                                            locked=True,
                                            locked_by='owner')
     fake_server_obj = self._create_fake_server_obj(fake_server)
     admin_context = context.get_admin_context()
     self.engine_api.rebuild(admin_context, fake_server_obj)
     self.assertTrue(mock_rebuild.called)
コード例 #29
0
 def get_routers(self, router_id=None):
     """Make a remote process call to retrieve the sync data for routers."""
     router_id = [router_id] if router_id else None
     # yes the plural is intended for havana compliance
     retval = self._client.call(context.get_admin_context().to_dict(),
                                'sync_routers',
                                host=self.host,
                                router_ids=router_id)  # plural
     return retval
コード例 #30
0
def notify(context, message):
    """Sends a notification as a meter using Ceilometer pipelines."""
    if not _pipeline_manager:
        _load_pipeline_manager()
    if not _notification_manager:
        _load_notification_manager()
    _notification_manager.map_method('to_samples_and_publish',
                                     context=context
                                     or req_context.get_admin_context(),
                                     notification=message)