def __init__(self, context, instance, destination, block_migration, disk_over_commit): self.context = context self.instance = instance self.destination = destination self.block_migration = block_migration self.disk_over_commit = disk_over_commit self.source = instance.host self.migrate_data = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() self.scheduler_client = scheduler_client.SchedulerClient() self.image_api = image.API()
def __init__(self, context, instance, destination, block_migration, disk_over_commit, select_hosts_callback): self.context = context self.instance = instance self.destination = destination self.block_migration = block_migration self.disk_over_commit = disk_over_commit self.select_hosts_callback = select_hosts_callback self.source = instance['host'] self.migrate_data = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() self.image_service = glance.get_default_image_service()
def __init__(self, context, instance, destination, block_migration, disk_over_commit, pclm): self.context = context self.instance = instance self.destination = destination self.block_migration = block_migration self.disk_over_commit = disk_over_commit self.source = instance.host self.migrate_data = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.image_service = glance.get_default_image_service() self.pclm = pclm
def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch('nova.objects.Instance.is_volume_backed') @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks, is_volume_backed): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{ 'host': 'host1', 'nodename': 'node1', 'limits': 'fake-limits' }] is_volume_backed.return_value = False self.assertEqual(("host1", 'fake-limits'), task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) setup_ig.assert_called_once_with(self.context, another_spec) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid]) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(another_spec.requested_destination.cell) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1", limits='fake-limits') do_test()
def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [[fake_selection1]] self.assertEqual(("host1", "node1"), task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) setup_ig.assert_called_once_with(self.context, another_spec) self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, another_spec, self.instance) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid], return_objects=True, return_alternates=False) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(another_spec.requested_destination.cell) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test()
def test_zookeeper_hierarchy_structure(self): """Test that hierarchy created by join method contains process id.""" from zookeeper import NoNodeException self.servicegroup_api = servicegroup.API() service_id = {'topic': 'unittest', 'host': 'serviceC'} # use existing session object session = self.servicegroup_api._driver._session # prepare a path that contains process id pid = os.getpid() path = '/servicegroups/%s/%s/%s' % (service_id['topic'], service_id['host'], pid) # assert that node doesn't exist yet self.assertRaises(NoNodeException, session.get, path) # join self.servicegroup_api.join(service_id['host'], service_id['topic'], None) # expected existing "process id" node self.assertTrue(session.get(path))
def check_services(): objects.register_all() host_api = compute.HostAPI() servicegroup_api = servicegroup.API() api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata') isOK = True print "============================ services check ============================" for s in host_api.service_get_all(cxt, set_zones=True, all_cells=True): if s['binary'] in api_services: continue if not servicegroup_api.service_is_up(s): isOK = False print "%s %s is down" % (s['host'], s['binary']) if s['disabled']: isOK = False print "%s %s is disabled" % (s['host'], s['binary']) if isOK: print "Service is OK"
def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{'host': 'host1'}] self.assertEqual("host1", task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) fake_props = {'instance_properties': {'uuid': self.instance_uuid}} setup_ig.assert_called_once_with( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid]) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test()
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.servicegroup_api = servicegroup.API() manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def test_report_state(self): serv = self.useFixture( ServiceFixture(self._host, self._binary, self._topic)).serv serv.start() db.service_get_by_args(self._ctx, self._host, self._binary) self.servicegroup_api = servicegroup.API() # updating model_disconnected serv.model_disconnected = True self.servicegroup_api._driver._report_state(serv) self.assertFalse(serv.model_disconnected) # handling exception serv.model_disconnected = True self.servicegroup_api._driver.mc = None self.servicegroup_api._driver._report_state(serv) self.assertTrue(serv.model_disconnected) delattr(serv, 'model_disconnected') self.servicegroup_api._driver.mc = None self.servicegroup_api._driver._report_state(serv) self.assertTrue(serv.model_disconnected)
def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 15 self.flags(service_down_time=down_time) self.mox.StubOutWithMock(timeutils, 'utcnow') self.servicegroup_api = servicegroup.API() # Up (equal) timeutils.utcnow().AndReturn(fts_func(fake_now)) service = { 'updated_at': fts_func(fake_now - self.down_time), 'created_at': fts_func(fake_now - self.down_time) } self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Up timeutils.utcnow().AndReturn(fts_func(fake_now)) service = { 'updated_at': fts_func(fake_now - self.down_time + 1), 'created_at': fts_func(fake_now - self.down_time + 1) } self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Down timeutils.utcnow().AndReturn(fts_func(fake_now)) service = { 'updated_at': fts_func(fake_now - self.down_time - 3), 'created_at': fts_func(fake_now - self.down_time - 3) } self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertFalse(result)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.servicegroup_api = servicegroup.API() manager_class = importutils.import_class(self.manager_class_name) if objects_base.NovaObject.indirection_api: conductor_api = conductor.API() conductor_api.wait_until_ready(context.get_admin_context()) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None setup_profiler(binary, self.host)
def setUp(self): super(SchedulerTestCase, self).setUp() self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI) def fake_show(meh, context, id): if id: return {'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} else: raise exception.ImageNotFound(image_id=id) fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.image_service = glance.get_default_image_service() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.servicegroup_api = servicegroup.API()
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] self.backdoor_port = None self.servicegroup_api = servicegroup.API()
def list(self, host=None, service=None): """Show a list of all running services. Filter by host & service name """ servicegroup_api = servicegroup.API() ctxt = context.get_admin_context() services = db.service_get_all(ctxt) services = availability_zones.set_availability_zones(ctxt, services) if host: services = [s for s in services if s['host'] == host] if service: services = [s for s in services if s['binary'] == service] print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" print(print_format % (_('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated_At'))) for svc in services: alive = servicegroup_api.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' print(print_format % (svc['binary'], svc['host'], svc['availability_zone'], active, art, svc['updated_at']))
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager # NOTE(russellb) We want to make sure to create the servicegroup API # instance early, before creating other things such as the manager, # that will also create a servicegroup API instance. Internally, the # servicegroup only allocates a single instance of the driver API and # we want to make sure that our value of db_allowed is there when it # gets created. For that to happen, this has to be the first instance # of the servicegroup API. self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def __init__(self, ext_mgr=None, *args, **kwargs): self.host_api = compute.HostAPI() self.servicegroup_api = servicegroup.API() self.ext_mgr = ext_mgr
def __init__(self): self.host_api = compute.HostAPI() self.servicegroup_api = servicegroup.API() super(HypervisorsController, self).__init__()
def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() self.image_service = glance.get_default_image_service()
def setUp(self): super(DBServiceGroupTestCase, self).setUp() self.down_time = 15 self.flags(service_down_time=self.down_time, servicegroup_driver='db') self.servicegroup_api = servicegroup.API()
def __init__(self): self.servicegroup_api = servicegroup.API()
def setUp(self): super(ServiceGroupApiTestCase, self).setUp() self.flags(servicegroup_driver='db') self.servicegroup_api = servicegroup.API() self.driver = self.servicegroup_api._driver
def __init__(self): super(AvailabilityZoneController, self).__init__() self.servicegroup_api = servicegroup.API() self.host_api = compute.HostAPI()
def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.servicegroup_api = servicegroup.API()
def __init__(self): self.host_manager = driver.DriverManager("nova.scheduler.host_manager", CONF.scheduler.host_manager, invoke_on_load=True).driver self.servicegroup_api = servicegroup.API()
def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec)
def setUp(self, mgc_mock): super(MemcachedServiceGroupTestCase, self).setUp() self.mc_client = mock.MagicMock() mgc_mock.return_value = self.mc_client self.flags(memcached_servers='ignored', servicegroup_driver='mc') self.servicegroup_api = servicegroup.API()
def __init__(self): self.host_api = compute.HostAPI() self.servicegroup_api = servicegroup.API()
def setUp(self, mock_init_agg, mock_init_inst): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.servicegroup_api = servicegroup.API()
def __init__(self, variables, hosts, instance_uuids, request_spec, filter_properties): self.servicegroup_api = servicegroup.API() [self.num_hosts, self.num_instances ] = self._get_host_instance_nums(hosts, instance_uuids, request_spec)