Пример #1
0
Файл: api.py Проект: altai/nova
 def remove_from_compute(self, context, volume, instance_id, host):
     """Remove volume from specified compute host."""
     rpc.call(context,
              self.db.queue_get_for(context, FLAGS.compute_topic, host),
              {"method": "remove_volume_connection",
               "args": {'instance_id': instance_id,
                        'volume_id': volume['id']}})
Пример #2
0
    def test_live_migration_dest_raises_exception(self):
        """Confirm exception when pre_live_migration fails."""
        i_ref = self._get_dummy_instance()
        c = context.get_admin_context()
        topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])

        dbmock = self.mox.CreateMock(db)
        dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
        self.mox.StubOutWithMock(rpc, 'call')
        rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
                                         "args": {'instance_id': i_ref['id']}})
        dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
                             AndReturn(topic)
        rpc.call(c, topic, {"method": "pre_live_migration",
                            "args": {'instance_id': i_ref['id']}}).\
                            AndRaise(rpc.RemoteError('', '', ''))
        dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
                                                'state': power_state.RUNNING,
                                                'host': i_ref['host']})
        for v in i_ref['volumes']:
            dbmock.volume_update(c, v['id'], {'status': 'in-use'})

        self.compute.db = dbmock
        self.mox.ReplayAll()
        self.assertRaises(rpc.RemoteError,
                          self.compute.live_migration,
                          c, i_ref['id'], i_ref['host'])
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = db.queue_get_for(context, FLAGS.compute_topic, src)

        filename = rpc.call(context, dst_t,
                            {"method": 'create_shared_storage_test_file'})

        try:
            # make sure existence at src host.
            ret = rpc.call(context, src_t,
                        {"method": 'check_shared_storage_test_file',
                        "args": {'filename': filename}})

        finally:
            rpc.cast(context, dst_t,
                    {"method": 'cleanup_shared_storage_test_file',
                    "args": {'filename': filename}})

        return ret
Пример #4
0
    def test_call_exception(self):
        """Test that exception gets passed back properly.

        rpc.call returns a RemoteError object.  The value of the
        exception is converted to a string, so we convert it back
        to an int in the test.

        """
        value = 42
        self.assertRaises(rpc.RemoteError, rpc.call, self.context, 'test', {
            "method": "fail",
            "args": {
                "value": value
            }
        })
        try:
            rpc.call(self.context, 'test', {
                "method": "fail",
                "args": {
                    "value": value
                }
            })
            self.fail("should have thrown rpc.RemoteError")
        except rpc.RemoteError as exc:
            self.assertEqual(int(exc.value), value)
Пример #5
0
    def test_live_migration_dest_raises_exception(self):
        """Confirm exception when pre_live_migration fails."""
        i_ref = self._get_dummy_instance()
        c = context.get_admin_context()
        topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])

        dbmock = self.mox.CreateMock(db)
        dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
        self.mox.StubOutWithMock(rpc, 'call')
        rpc.call(c, FLAGS.volume_topic, {
            "method": "check_for_export",
            "args": {
                'instance_id': i_ref['id']
            }
        })
        dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
                             AndReturn(topic)
        rpc.call(c, topic, {"method": "pre_live_migration",
                            "args": {'instance_id': i_ref['id']}}).\
                            AndRaise(rpc.RemoteError('', '', ''))
        dbmock.instance_update(
            c, i_ref['id'], {
                'state_description': 'running',
                'state': power_state.RUNNING,
                'host': i_ref['host']
            })
        for v in i_ref['volumes']:
            dbmock.volume_update(c, v['id'], {'status': 'in-use'})

        self.compute.db = dbmock
        self.mox.ReplayAll()
        self.assertRaises(rpc.RemoteError, self.compute.live_migration, c,
                          i_ref['id'], i_ref['host'])
Пример #6
0
    def test_live_migration_common_check_checking_cpuinfo_fail(self):
        """Raise excetion when original host doen't have compatible cpu."""

        dest = 'dummydest'
        instance_id = self._create_instance()
        i_ref = db.instance_get(self.context, instance_id)

        # compute service for destination
        s_ref = self._create_compute_service(host=i_ref['host'])
        # compute service for original host
        s_ref2 = self._create_compute_service(host=dest)

        # mocks
        driver = self.scheduler.driver
        self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
        driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
        self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
        rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
            {"method": 'compare_cpu',
            "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
             AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))

        self.mox.ReplayAll()
        try:
            self.scheduler.driver._live_migration_common_check(self.context,
                                                               i_ref,
                                                               dest)
        except rpc.RemoteError, e:
            c = (e.message.find(_("doesn't have compatibility to")) >= 0)
Пример #7
0
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = db.queue_get_for(context, FLAGS.compute_topic, src)

        filename = rpc.call(context, dst_t,
                            {"method": 'create_shared_storage_test_file'})

        try:
            # make sure existence at src host.
            ret = rpc.call(context, src_t,
                        {"method": 'check_shared_storage_test_file',
                        "args": {'filename': filename}})

        finally:
            rpc.cast(context, dst_t,
                    {"method": 'cleanup_shared_storage_test_file',
                    "args": {'filename': filename}})

        return ret
Пример #8
0
Файл: api.py Проект: kiall/nova
 def disassociate_floating_ip(self, context, instance, address,
                              affect_auto_assigned=False):
     """Disassociates a floating ip from fixed ip it is associated with."""
     rpc.call(context,
              FLAGS.network_topic,
              {'method': 'disassociate_floating_ip',
               'args': {'address': address}})
Пример #9
0
 def remove_from_compute(self, context, instance_id, volume_id, host):
     """Remove volume from specified compute host."""
     rpc.call(context,
              self.db.queue_get_for(context, FLAGS.compute_topic, host),
              {"method": "remove_volume_connection",
               "args": {'instance_id': instance_id,
                        'volume_id': volume_id}})
Пример #10
0
    def test_live_migration_common_check_checking_cpuinfo_fail(self):
        """Raise excetion when original host doen't have compatible cpu."""

        dest = 'dummydest'
        instance_id = self._create_instance()
        i_ref = db.instance_get(self.context, instance_id)

        # compute service for destination
        s_ref = self._create_compute_service(host=i_ref['host'])
        # compute service for original host
        s_ref2 = self._create_compute_service(host=dest)

        # mocks
        driver = self.scheduler.driver
        self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
        driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
        self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
        rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
            {"method": 'compare_cpu',
            "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
             AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))

        self.mox.ReplayAll()
        try:
            self.scheduler.driver._live_migration_common_check(
                self.context, i_ref, dest)
        except rpc.RemoteError, e:
            c = (e.message.find(_("doesn't have compatibility to")) >= 0)
Пример #11
0
    def test_live_migration_works_correctly_no_volume(self):
        """Confirm live_migration() works as expected correctly."""
        i_ref = self._get_dummy_instance()
        i_ref['volumes'] = []
        c = context.get_admin_context()
        topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])

        dbmock = self.mox.CreateMock(db)
        dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
        self.mox.StubOutWithMock(rpc, 'call')
        dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
                             AndReturn(topic)
        rpc.call(c, topic, {
            "method": "pre_live_migration",
            "args": {
                'instance_id': i_ref['id']
            }
        })
        self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
        self.compute.driver.live_migration(c, i_ref, i_ref['host'],
                                           self.compute.post_live_migration,
                                           self.compute.recover_live_migration)

        self.compute.db = dbmock
        self.mox.ReplayAll()
        ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
        self.assertEqual(ret, None)
Пример #12
0
    def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
                                            disk_over_commit):
        """Checks if destination host has enough disk for block migration.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        """

        # Libvirt supports qcow2 disk format,which is usually compressed
        # on compute nodes.
        # Real disk image (compressed) may enlarged to "virtual disk size",
        # that is specified as the maximum disk size.
        # (See qemu-img -f path-to-disk)
        # Scheduler recognizes destination host still has enough disk space
        # if real disk size < available disk size
        # if disk_over_commit is True,
        #  otherwise virtual disk size < available disk size.

        # Refresh compute_nodes table
        topic = db.queue_get_for(context, FLAGS.compute_topic, dest)
        rpc.call(context, topic,
                 {"method": "update_available_resource"})

        # Getting total available disk of host
        available_gb = self._get_compute_info(context,
                                              dest, 'disk_available_least')
        available = available_gb * (1024 ** 3)

        # Getting necessary disk size
        try:
            topic = db.queue_get_for(context, FLAGS.compute_topic,
                                              instance_ref['host'])
            ret = rpc.call(context, topic,
                           {"method": 'get_instance_disk_info',
                            "args": {'instance_name': instance_ref.name}})
            disk_infos = utils.loads(ret)
        except rpc.RemoteError:
            LOG.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise

        necessary = 0
        if disk_over_commit:
            for info in disk_infos:
                necessary += int(info['disk_size'])
        else:
            for info in disk_infos:
                necessary += int(info['virt_disk_size'])

        # Check that available disk > necessary disk
        if (available - necessary) < 0:
            instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
            reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
                       "Lack of disk(host:%(available)s "
                       "<= instance:%(necessary)s)")
            raise exception.MigrationError(reason=reason % locals())
Пример #13
0
    def test_live_migration_dest_raises_exception_no_volume(self):
        """Same as above test(input pattern is different) """
        i_ref = self._get_dummy_instance()
        i_ref['volumes'] = []
        c = context.get_admin_context()
        topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])

        dbmock = self.mox.CreateMock(db)
        dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
        dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
                             AndReturn(topic)
        self.mox.StubOutWithMock(rpc, 'call')
        rpc.call(c, topic, {"method": "pre_live_migration",
                            "args": {'instance_id': i_ref['id']}}).\
                            AndRaise(rpc.RemoteError('', '', ''))
        dbmock.instance_update(
            c, i_ref['id'], {
                'state_description': 'running',
                'state': power_state.RUNNING,
                'host': i_ref['host']
            })

        self.compute.db = dbmock
        self.mox.ReplayAll()
        self.assertRaises(rpc.RemoteError, self.compute.live_migration, c,
                          i_ref['id'], i_ref['host'])
Пример #14
0
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref["host"]
        dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = db.queue_get_for(context, FLAGS.compute_topic, src)

        filename = None

        try:
            # create tmpfile at dest host
            filename = rpc.call(context, dst_t, {"method": "create_shared_storage_test_file"})

            # make sure existence at src host.
            ret = rpc.call(context, src_t, {"method": "check_shared_storage_test_file", "args": {"filename": filename}})
            if not ret:
                raise exception.FileNotFound(file_path=filename)

        except exception.FileNotFound:
            raise

        finally:
            # Should only be None for tests?
            if filename is not None:
                rpc.call(context, dst_t, {"method": "cleanup_shared_storage_test_file", "args": {"filename": filename}})
Пример #15
0
    def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
                                            disk_over_commit):
        """Checks if destination host has enough disk for block migration.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        """

        # Libvirt supports qcow2 disk format,which is usually compressed
        # on compute nodes.
        # Real disk image (compressed) may enlarged to "virtual disk size",
        # that is specified as the maximum disk size.
        # (See qemu-img -f path-to-disk)
        # Scheduler recognizes destination host still has enough disk space
        # if real disk size < available disk size
        # if disk_over_commit is True,
        #  otherwise virtual disk size < available disk size.

        # Refresh compute_nodes table
        topic = db.queue_get_for(context, FLAGS.compute_topic, dest)
        rpc.call(context, topic,
                 {"method": "update_available_resource"})

        # Getting total available disk of host
        available_gb = self._get_compute_info(context,
                                              dest, 'disk_available_least')
        available = available_gb * (1024 ** 3)

        # Getting necessary disk size
        try:
            topic = db.queue_get_for(context, FLAGS.compute_topic,
                                              instance_ref['host'])
            ret = rpc.call(context, topic,
                           {"method": 'get_instance_disk_info',
                            "args": {'instance_name': instance_ref['name']}})
            disk_infos = utils.loads(ret)
        except rpc.RemoteError:
            LOG.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise

        necessary = 0
        if disk_over_commit:
            for info in disk_infos:
                necessary += int(info['disk_size'])
        else:
            for info in disk_infos:
                necessary += int(info['virt_disk_size'])

        # Check that available disk > necessary disk
        if (available - necessary) < 0:
            instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
            reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
                       "Lack of disk(host:%(available)s "
                       "<= instance:%(necessary)s)")
            raise exception.MigrationError(reason=reason % locals())
Пример #16
0
 def add_fixed_ip_to_instance(self, context, instance, network_id):
     """Adds a fixed ip to instance from specified network."""
     args = {'instance_id': instance['id'],
             'host': instance['host'],
             'network_id': network_id}
     rpc.call(context, FLAGS.network_topic,
              {'method': 'add_fixed_ip_to_instance',
               'args': args})
Пример #17
0
 def release_floating_ip(self, context, address,
                         affect_auto_assigned=False):
     """Removes floating ip with address from a project. (deallocates)"""
     rpc.call(context,
              FLAGS.network_topic,
              {'method': 'deallocate_floating_ip',
               'args': {'address': address,
                        'affect_auto_assigned': affect_auto_assigned}})
Пример #18
0
Файл: api.py Проект: altai/nova
 def add_network_to_project(self, context, project_id):
     """Force adds another network to a project."""
     rpc.call(context, FLAGS.network_topic, {
         'method': 'add_network_to_project',
         'args': {
             'project_id': project_id
         }
     })
Пример #19
0
    def show_host_resources(self, context, host):
        """Shows the physical/usage resource given by hosts.

        :param context: security context
        :param host: hostname
        :returns:
            example format is below.
            {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
            D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
                'vcpus_used': 12, 'memory_mb_used': 10240,
                'local_gb_used': 64}

        """
        # Update latest compute_node table
        topic = db.queue_get_for(context, FLAGS.compute_topic, host)
        rpc.call(context, topic, {"method": "update_available_resource"})

        # Getting compute node info and related instances info
        compute_ref = db.service_get_all_compute_by_host(context, host)
        compute_ref = compute_ref[0]
        instance_refs = db.instance_get_all_by_host(context,
                                                    compute_ref['host'])

        # Getting total available/used resource
        compute_ref = compute_ref['compute_node'][0]
        resource = {
            'vcpus': compute_ref['vcpus'],
            'memory_mb': compute_ref['memory_mb'],
            'local_gb': compute_ref['local_gb'],
            'vcpus_used': compute_ref['vcpus_used'],
            'memory_mb_used': compute_ref['memory_mb_used'],
            'local_gb_used': compute_ref['local_gb_used']
        }
        usage = dict()
        if not instance_refs:
            return {'resource': resource, 'usage': usage}

        # Getting usage resource per project
        project_ids = [i['project_id'] for i in instance_refs]
        project_ids = list(set(project_ids))
        for project_id in project_ids:
            vcpus = [i['vcpus'] for i in instance_refs \
                if i['project_id'] == project_id]

            mem = [i['memory_mb']  for i in instance_refs \
                if i['project_id'] == project_id]

            disk = [i['local_gb']  for i in instance_refs \
                if i['project_id'] == project_id]

            usage[project_id] = {
                'vcpus': reduce(lambda x, y: x + y, vcpus),
                'memory_mb': reduce(lambda x, y: x + y, mem),
                'local_gb': reduce(lambda x, y: x + y, disk)
            }

        return {'resource': resource, 'usage': usage}
Пример #20
0
 def deallocate_for_instance(self, context, instance, **kwargs):
     """Deallocates all network structures related to instance."""
     args = kwargs
     args['instance_id'] = instance['id']
     args['project_id'] = instance['project_id']
     args['host'] = instance['host']
     rpc.call(context, FLAGS.network_topic,
              {'method': 'deallocate_for_instance',
               'args': args})
Пример #21
0
    def show_host_resources(self, context, host):
        """Shows the physical/usage resource given by hosts.

        :param context: security context
        :param host: hostname
        :returns:
            example format is below.
            {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
            D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
                'vcpus_used': 12, 'memory_mb_used': 10240,
                'local_gb_used': 64}

        """
        # Update latest compute_node table
        topic = db.queue_get_for(context, FLAGS.compute_topic, host)
        rpc.call(context, topic, {"method": "update_available_resource"})

        # Getting compute node info and related instances info
        compute_ref = db.service_get_all_compute_by_host(context, host)
        compute_ref = compute_ref[0]
        instance_refs = db.instance_get_all_by_host(context,
                                                    compute_ref['host'])

        # Getting total available/used resource
        compute_ref = compute_ref['compute_node'][0]
        resource = {'vcpus': compute_ref['vcpus'],
                    'memory_mb': compute_ref['memory_mb'],
                    'local_gb': compute_ref['local_gb'],
                    'vcpus_used': compute_ref['vcpus_used'],
                    'memory_mb_used': compute_ref['memory_mb_used'],
                    'local_gb_used': compute_ref['local_gb_used']}
        usage = dict()
        if not instance_refs:
            return {'resource': resource, 'usage': usage}

        # Getting usage resource per project
        project_ids = [i['project_id'] for i in instance_refs]
        project_ids = list(set(project_ids))
        for project_id in project_ids:
            vcpus = [i['vcpus'] for i in instance_refs
                     if i['project_id'] == project_id]

            mem = [i['memory_mb'] for i in instance_refs
                   if i['project_id'] == project_id]

            root = [i['root_gb'] for i in instance_refs
                    if i['project_id'] == project_id]

            ephemeral = [i['ephemeral_gb'] for i in instance_refs
                         if i['project_id'] == project_id]

            usage[project_id] = {'vcpus': sum(vcpus),
                                 'memory_mb': sum(mem),
                                 'root_gb': sum(root),
                                 'ephemeral_gb': sum(ephemeral)}

        return {'resource': resource, 'usage': usage}
Пример #22
0
    def remove_fixed_ip_from_instance(self, context, instance, address):
        """Removes a fixed ip from instance from specified network."""

        args = {'instance_id': instance['id'],
                'host': instance['host'],
                'address': address}
        rpc.call(context, FLAGS.network_topic,
                 {'method': 'remove_fixed_ip_from_instance',
                  'args': args})
Пример #23
0
    def _live_migration_common_check(self, context, instance_ref, dest):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        # Checking shared storage connectivity
        self.mounted_on_same_shared_storage(context, instance_ref, dest)

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(context,
                                           instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.Invalid(_("host %s where instance was launched "
                                      "does not exist.")
                                       % instance_ref['launched_on'])
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.Invalid(_("Different hypervisor type"
                                      "(%(orig_hypervisor)s->"
                                      "%(dest_hypervisor)s)')" % locals()))

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.Invalid(_("Older hypervisor version"
                                      "(%(orig_hypervisor)s->"
                                      "%(dest_hypervisor)s)") % locals())

        # Checking cpuinfo.
        try:
            rpc.call(context,
                     db.queue_get_for(context, FLAGS.compute_topic, dest),
                     {"method": 'compare_cpu',
                      "args": {'cpu_info': oservice_ref['cpu_info']}})

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise
Пример #24
0
    def _live_migration_common_check(self, context, instance_ref, dest):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        # Checking shared storage connectivity
        self.mounted_on_same_shared_storage(context, instance_ref, dest)

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(
                context, instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(
                context, db.queue_get_for(context, FLAGS.compute_topic, dest),
                {
                    "method": 'compare_cpu',
                    "args": {
                        'cpu_info': oservice_ref['cpu_info']
                    }
                })

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(
                _("host %(dest)s is not compatible with "
                  "original host %(src)s.") % locals())
            raise
Пример #25
0
Файл: api.py Проект: altai/nova
 def deallocate_for_instance(self, context, instance, **kwargs):
     """Deallocates all network structures related to instance."""
     args = kwargs
     args['instance_id'] = instance['id']
     args['project_id'] = instance['project_id']
     args['host'] = instance['host']
     rpc.call(context, FLAGS.network_topic, {
         'method': 'deallocate_for_instance',
         'args': args
     })
Пример #26
0
Файл: api.py Проект: altai/nova
 def add_fixed_ip_to_instance(self, context, instance, network_id):
     """Adds a fixed ip to instance from specified network."""
     args = {
         'instance_id': instance['id'],
         'host': instance['host'],
         'network_id': network_id
     }
     rpc.call(context, FLAGS.network_topic, {
         'method': 'add_fixed_ip_to_instance',
         'args': args
     })
Пример #27
0
    def delete_network(self, context, fixed_range, uuid):
        """Lookup network by uuid, delete both the IPAM
           subnet and the corresponding Quantum network.

           The fixed_range parameter is kept here for interface compatibility
           but is not used.
        """
        net_ref = db.network_get_by_uuid(context.elevated(), uuid)
        project_id = net_ref['project_id']
        q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
        net_uuid = net_ref['uuid']

        # Check for any attached ports on the network and fail the deletion if
        # there is anything but the gateway port attached.  If it is only the
        # gateway port, unattach and delete it.
        ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid)
        num_ports = len(ports)
        gw_interface_id = self.driver.get_dev(net_ref)
        gw_port_uuid = None
        if gw_interface_id is not None:
            gw_port_uuid = self.q_conn.get_port_by_attachment(
                q_tenant_id, net_uuid, gw_interface_id)

        if gw_port_uuid:
            num_ports -= 1

        if num_ports > 0:
            raise exception.NetworkBusy(network=net_uuid)

        # only delete gw ports if we are going to finish deleting network
        if gw_port_uuid:
            self.q_conn.detach_and_delete_port(q_tenant_id, net_uuid,
                                               gw_port_uuid)
            self.l3driver.remove_gateway(net_ref)

        # Now we can delete the network
        self.q_conn.delete_network(q_tenant_id, net_uuid)
        LOG.debug("Deleting network %s for tenant: %s" %
                  (net_uuid, q_tenant_id))
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
        # Get rid of dnsmasq
        if FLAGS.quantum_use_dhcp:
            if net_ref['host'] == self.host:
                self.kill_dhcp(net_ref)
            else:
                topic = rpc.queue_get_for(context, FLAGS.network_topic,
                                          net_ref['host'])

                rpc.call(context, topic, {
                    'method': 'kill_dhcp',
                    'args': {
                        'net_ref': net_ref
                    }
                })
Пример #28
0
    def associate_floating_ip(self, context, floating_address, fixed_address,
                                                 affect_auto_assigned=False):
        """Associates a floating ip with a fixed ip.

        ensures floating ip is allocated to the project in context
        """
        rpc.call(context,
                 FLAGS.network_topic,
                 {'method': 'associate_floating_ip',
                  'args': {'floating_address': floating_address,
                           'fixed_address': fixed_address,
                           'affect_auto_assigned': affect_auto_assigned}})
Пример #29
0
Файл: api.py Проект: altai/nova
    def remove_fixed_ip_from_instance(self, context, instance, address):
        """Removes a fixed ip from instance from specified network."""

        args = {
            'instance_id': instance['id'],
            'host': instance['host'],
            'address': address
        }
        rpc.call(context, FLAGS.network_topic, {
            'method': 'remove_fixed_ip_from_instance',
            'args': args
        })
Пример #30
0
    def test_block_migration_dest_check_service_lack_disk(self):
        """Confirms exception raises when dest doesn't have enough disk."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(self.driver,
                'assert_compute_node_has_enough_memory')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)

        # Enough memory
        self.driver.assert_compute_node_has_enough_memory(self.context,
                instance, dest)

        # Not enough disk
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1023)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        instance_disk_info_msg = {
            'method': 'get_instance_disk_info',
            'args': {
                'instance_name': instance['name'],
            },
            'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION,
        }
        instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
        rpc.call(self.context,
                 'src_queue',
                 instance_disk_info_msg,
                 None).AndReturn(jsonutils.dumps(instance_disk_info))

        self.mox.ReplayAll()
        self.assertRaises(exception.MigrationError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
Пример #31
0
    def test_block_migration_dest_check_service_lack_disk(self):
        """Confirms exception raises when dest doesn't have enough disk."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(self.driver,
                'assert_compute_node_has_enough_memory')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)

        # Enough memory
        self.driver.assert_compute_node_has_enough_memory(self.context,
                instance, dest)

        # Not enough disk
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1023)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        instance_disk_info_msg = {
            'method': 'get_instance_disk_info',
            'args': {
                'instance_name': instance['name'],
            },
            'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION,
        }
        instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
        rpc.call(self.context,
                 'src_queue',
                 instance_disk_info_msg,
                 None).AndReturn(jsonutils.dumps(instance_disk_info))

        self.mox.ReplayAll()
        self.assertRaises(exception.MigrationError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
Пример #32
0
 def disassociate_floating_ip(self,
                              context,
                              instance,
                              address,
                              affect_auto_assigned=False):
     """Disassociates a floating ip from fixed ip it is associated with."""
     rpc.call(context, FLAGS.network_topic, {
         'method': 'disassociate_floating_ip',
         'args': {
             'address': address
         }
     })
Пример #33
0
    def associate_floating_ip(self, context, floating_address, fixed_address,
                                                 affect_auto_assigned=False):
        """Associates a floating ip with a fixed ip.

        ensures floating ip is allocated to the project in context
        """
        rpc.call(context,
                 FLAGS.network_topic,
                 {'method': 'associate_floating_ip',
                  'args': {'floating_address': floating_address,
                           'fixed_address': fixed_address,
                           'affect_auto_assigned': affect_auto_assigned}})
Пример #34
0
    def delete_network(self, context, fixed_range, uuid):
        """Lookup network by uuid, delete both the IPAM
           subnet and the corresponding Quantum network.

           The fixed_range parameter is kept here for interface compatibility
           but is not used.
        """
        net_ref = db.network_get_by_uuid(context.elevated(), uuid)
        project_id = net_ref['project_id']
        q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
        net_uuid = net_ref['uuid']

        # Check for any attached ports on the network and fail the deletion if
        # there is anything but the gateway port attached.  If it is only the
        # gateway port, unattach and delete it.
        ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid)
        num_ports = len(ports)
        gw_interface_id = self.driver.get_dev(net_ref)
        gw_port_uuid = None
        if gw_interface_id is not None:
            gw_port_uuid = self.q_conn.get_port_by_attachment(q_tenant_id,
                                        net_uuid, gw_interface_id)

        if gw_port_uuid:
            num_ports -= 1

        if num_ports > 0:
            raise exception.NetworkBusy(network=net_uuid)

        # only delete gw ports if we are going to finish deleting network
        if gw_port_uuid:
            self.q_conn.detach_and_delete_port(q_tenant_id,
                                                   net_uuid,
                                                   gw_port_uuid)
            self.l3driver.remove_gateway(net_ref)

        # Now we can delete the network
        self.q_conn.delete_network(q_tenant_id, net_uuid)
        LOG.debug("Deleting network %s for tenant: %s" %
                  (net_uuid, q_tenant_id))
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
        # Get rid of dnsmasq
        if FLAGS.quantum_use_dhcp:
            if net_ref['host'] == self.host:
                self.kill_dhcp(net_ref)
            else:
                topic = rpc.queue_get_for(context,
                        FLAGS.network_topic,
                        net_ref['host'])

                rpc.call(context, topic, {'method': 'kill_dhcp',
                        'args': {'net_ref': net_ref}})
Пример #35
0
 def disassociate_floating_ip(self, context, address, affect_auto_assigned=False):
     """Disassociates a floating ip from fixed ip it is associated with."""
     floating_ip = self.db.floating_ip_get_by_address(context, address)
     if not affect_auto_assigned and floating_ip.get("auto_assigned"):
         return
     if not floating_ip.get("fixed_ip"):
         raise exception.ApiError("Address is not associated.")
     host = floating_ip["fixed_ip"]["network"]["host"]
     rpc.call(
         context,
         self.db.queue_get_for(context, FLAGS.network_topic, host),
         {"method": "disassociate_floating_ip", "args": {"floating_address": floating_ip["address"]}},
     )
Пример #36
0
Файл: api.py Проект: altai/nova
 def release_floating_ip(self,
                         context,
                         address,
                         affect_auto_assigned=False):
     """Removes floating ip with address from a project. (deallocates)"""
     rpc.call(
         context, FLAGS.network_topic, {
             'method': 'deallocate_floating_ip',
             'args': {
                 'address': address,
                 'affect_auto_assigned': affect_auto_assigned
             }
         })
Пример #37
0
 def _really_run_instance(self, user, launchstate, idx):
     launchstate['instance_id'] = generate_uid('i')
     launchstate['ami_launch_index'] = idx
     network = self.network.get_users_network(str(user.id))
     launchstate['network_str'] = network.to_dict()
     launchstate['bridge_name'] = network.bridge_name
     logging.debug(
         "Casting to node for %s's instance with IP of %s in the %s network"
         % (user.name, launchstate['private_dns_name'],
            launchstate['network_name']))
     rpc.call(FLAGS.compute_topic, {
         "method": "run_instance",
         "args": launchstate
     })
     return launchstate
Пример #38
0
 def _really_run_instance(self, user, launchstate, idx):
     launchstate['instance_id'] = generate_uid('i')
     launchstate['ami_launch_index'] = idx 
     network = self.network.get_users_network(str(user.id))
     launchstate['network_str'] = network.to_dict()
     launchstate['bridge_name'] = network.bridge_name
     logging.debug("Casting to node for %s's instance with IP of %s in the %s network" %
                   (user.name,
                    launchstate['private_dns_name'],
                    launchstate['network_name']))
     rpc.call(FLAGS.compute_topic, 
                           {"method": "run_instance", 
                            "args" : launchstate 
                                     })
     return launchstate
Пример #39
0
    def test_live_migration_different_hypervisor_type_raises(self):
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
        self.mox.StubOutWithMock(db, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')

        dest = 'fake_host2'
        block_migration = False
        disk_over_commit = False
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        self.driver._live_migration_dest_check(self.context, instance,
                dest, block_migration, disk_over_commit)

        db.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        db.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        tmp_filename = 'test-filename'
        rpc.call(self.context, 'dest_queue',
                {'method': 'create_shared_storage_test_file'}
                ).AndReturn(tmp_filename)
        rpc.call(self.context, 'src_queue',
                {'method': 'check_shared_storage_test_file',
                 'args': {'filename': tmp_filename}}).AndReturn(True)
        rpc.cast(self.context, 'dest_queue',
                {'method': 'cleanup_shared_storage_test_file',
                 'args': {'filename': tmp_filename}})
        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        # different hypervisor type
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'not-xen',
                                        'hypervisor_version': 1}]}])

        self.mox.ReplayAll()
        self.assertRaises(exception.InvalidHypervisorType,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
Пример #40
0
 def get_instance_nw_info(self, context, instance):
     """Returns all network info related to an instance."""
     args = {'instance_id': instance['id'],
             'instance_uuid': instance['uuid'],
             'instance_type_id': instance['instance_type_id'],
             'host': instance['host']}
     try:
         return rpc.call(context, FLAGS.network_topic,
                 {'method': 'get_instance_nw_info',
                 'args': args})
     # FIXME(comstud) rpc calls raise RemoteError if the remote raises
     # an exception.  In the case here, because of a race condition,
     # it's possible the remote will raise a InstanceNotFound when
     # someone deletes the instance while this call is in progress.
     #
     # Unfortunately, we don't have access to the original exception
     # class now.. but we do have the exception class's name.  So,
     # we're checking it here and raising a new exception.
     #
     # Ultimately we need RPC to be able to serialize more things like
     # classes.
     except rpc_common.RemoteError as err:
         if err.exc_type == 'InstanceNotFound':
             raise exception.InstanceNotFound(instance_id=instance['id'])
         raise
Пример #41
0
    def test_nested_calls(self):
        """Test that we can do an rpc.call inside another call"""
        class Nested(object):
            @staticmethod
            def echo(context, queue, value):
                """Calls echo in the passed queue"""
                LOG.debug(_("Nested received %(queue)s, %(value)s")
                        % locals())
                ret = rpc.call(context,
                               queue,
                               {"method": "echo",
                                "args": {"value": value}})
                LOG.debug(_("Nested return %s"), ret)
                return value

        nested = Nested()
        conn = rpc.Connection.instance(True)
        consumer = rpc.AdapterConsumer(connection=conn,
                                       topic='nested',
                                       proxy=nested)
        consumer.attach_to_eventlet()
        value = 42
        result = rpc.call(self.context,
                          'nested', {"method": "echo",
                                     "args": {"queue": "test",
                                              "value": value}})
        self.assertEqual(value, result)
Пример #42
0
    def __call__(self, environ, start_response):
        try:
            req = webob.Request(environ)
            LOG.audit(_("Request: %s"), req)
            token = req.params.get('token')
            if not token:
                LOG.audit(_("Request made with missing token: %s"), req)
                start_response('400 Invalid Request',
                               [('content-type', 'text/html')])
                return "Invalid Request"

            ctxt = context.get_admin_context()
            connect_info = rpc.call(ctxt, FLAGS.consoleauth_topic,
                                    {'method': 'check_token',
                                     'args': {'token': token}})

            if not connect_info:
                LOG.audit(_("Request made with invalid token: %s"), req)
                start_response('401 Not Authorized',
                               [('content-type', 'text/html')])
                return "Not Authorized"

            return self.proxy_connection(req, connect_info, start_response)
        except Exception as e:
            LOG.audit(_("Unexpected error: %s"), e)
Пример #43
0
Файл: api.py Проект: xtoddx/nova
 def get_dns_entries_by_name(self, context, name, domain):
     """Get entries for name and domain"""
     args = {'name': name, 'domain': domain}
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'get_dns_entries_by_name',
         'args': args
     })
Пример #44
0
 def test_context_passed(self):
     """Makes sure a context is passed through rpc call"""
     value = 42
     result = rpc.call(self.context,
                       'test', {"method": "context",
                                "args": {"value": value}})
     self.assertEqual(self.context.to_dict(), result)
Пример #45
0
Файл: api.py Проект: xtoddx/nova
 def get_floating_ip_by_address(self, context, address):
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'get_floating_ip_by_address',
         'args': {
             'address': address
         }
     })
Пример #46
0
Файл: api.py Проект: xtoddx/nova
 def get_floating_ip(self, context, id):
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'get_floating_ip',
         'args': {
             'id': id
         }
     })
Пример #47
0
 def get_pool_for_instance_host(self, context, instance_host):
     """Gets console pool info for the instance."""
     context = context.elevated()
     console_type = self.driver.console_type
     try:
         pool = self.db.console_pool_get_by_host_type(
             context, instance_host, self.host, console_type)
     except exception.NotFound:
         pool_info = rpc.call(
             context,
             self.db.queue_get_for(context, FLAGS.compute_topic,
                                   instance_host), {
                                       'method': 'get_console_pool_info',
                                       'args': {
                                           'console_type': console_type
                                       }
                                   })
         pool_info['password'] = self.driver.fix_pool_password(
             pool_info['password'])
         pool_info['host'] = self.host
         # ESX Address or Proxy Address
         public_host_name = pool_info['address']
         if FLAGS.console_public_hostname:
             public_host_name = FLAGS.console_public_hostname
         pool_info['public_hostname'] = public_host_name
         pool_info['console_type'] = console_type
         pool_info['compute_host'] = instance_host
         pool = self.db.console_pool_create(context, pool_info)
     return pool
Пример #48
0
Файл: api.py Проект: xtoddx/nova
 def create_public_dns_domain(self, context, domain, project=None):
     """Create a private DNS domain with optional nova project."""
     args = {'domain': domain, 'project': project}
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'create_public_dns_domain',
         'args': args
     })
Пример #49
0
Файл: api.py Проект: kiall/nova
 def get_vifs_by_instance(self, context, instance):
     # NOTE(vish): When the db calls are converted to store network
     #             data by instance_uuid, this should pass uuid instead.
     return rpc.call(context,
                     FLAGS.network_topic,
                     {'method': 'get_vifs_by_instance',
                      'args': {'instance_id': instance['id']}})
Пример #50
0
Файл: api.py Проект: kiall/nova
    def setup_networks_on_host(self, context, instance, host=None,
                                                        teardown=False):
        """Setup or teardown the network structures on hosts related to
           instance"""
        host = host or instance['host']
        # NOTE(tr3buchet): host is passed in cases where we need to setup
        # or teardown the networks on a host which has been migrated to/from
        # and instance['host'] is not yet or is no longer equal to
        args = {'instance_id': instance['id'],
                'host': host,
                'teardown': teardown}

        # NOTE(tr3buchet): the call is just to wait for completion
        rpc.call(context, FLAGS.network_topic,
                 {'method': 'setup_networks_on_host',
                  'args': args})
Пример #51
0
Файл: api.py Проект: xtoddx/nova
 def create_private_dns_domain(self, context, domain, availability_zone):
     """Create a private DNS domain with nova availability zone."""
     args = {'domain': domain, 'av_zone': availability_zone}
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'create_private_dns_domain',
         'args': args
     })
Пример #52
0
 def get_pool_for_instance_host(self, context, instance_host):
     """Gets console pool info for the instance."""
     context = context.elevated()
     console_type = self.driver.console_type
     try:
         pool = self.db.console_pool_get_by_host_type(context,
                                                      instance_host,
                                                      self.host,
                                                      console_type)
     except exception.NotFound:
         pool_info = rpc.call(context,
                              self.db.queue_get_for(context,
                                                    FLAGS.compute_topic,
                                                    instance_host),
                              {'method': 'get_console_pool_info',
                               'args': {'console_type': console_type}})
         pool_info['password'] = self.driver.fix_pool_password(
                                                 pool_info['password'])
         pool_info['host'] = self.host
         # ESX Address or Proxy Address
         public_host_name = pool_info['address']
         if FLAGS.console_public_hostname:
             public_host_name = FLAGS.console_public_hostname
         pool_info['public_hostname'] = public_host_name
         pool_info['console_type'] = console_type
         pool_info['compute_host'] = instance_host
         pool = self.db.console_pool_create(context, pool_info)
     return pool
Пример #53
0
 def get_dns_zones(self, context):
     """Returns a list of available dns zones.
     These can be used to create DNS entries for floating ips.
     """
     return rpc.call(context,
                     FLAGS.network_topic,
                     {'method': 'get_dns_zones'})
Пример #54
0
 def initialize_connection(self, context, volume, connector):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue,
                     {"method": "initialize_connection",
                      "args": {"volume_id": volume['id'],
                               "connector": connector}})
Пример #55
0
 def get_pool_for_instance_host(self, context, instance_host):
     context = context.elevated()
     console_type = self.driver.console_type
     try:
         pool = self.db.console_pool_get_by_host_type(context,
                                                      instance_host,
                                                      self.host,
                                                      console_type)
     except exception.NotFound:
         #NOTE(mdragon): Right now, the only place this info exists is the
         #               compute worker's flagfile, at least for
         #               xenserver. Thus we ned to ask.
         if FLAGS.stub_compute:
             pool_info = {'address': '127.0.0.1',
                          'username': '******',
                          'password': '******'}
         else:
             pool_info = rpc.call(context,
                              self.db.queue_get_for(context,
                                                FLAGS.compute_topic,
                                                instance_host),
                    {'method': 'get_console_pool_info',
                     'args': {'console_type': console_type}})
         pool_info['password'] = self.driver.fix_pool_password(
                                                 pool_info['password'])
         pool_info['host'] = self.host
         pool_info['public_hostname'] = FLAGS.console_public_hostname
         pool_info['console_type'] = self.driver.console_type
         pool_info['compute_host'] = instance_host
         pool = self.db.console_pool_create(context, pool_info)
     return pool
Пример #56
0
    def _allocate_fixed_ips(self, context, instance_id, host, networks,
                            **kwargs):
        """Calls allocate_fixed_ip once for each network."""
        green_pool = greenpool.GreenPool()

        vpn = kwargs.pop('vpn')
        for network in networks:
            # NOTE(vish): if we are not multi_host pass to the network host
            if not network['multi_host']:
                host = network['host']
            # NOTE(vish): if there is no network host, set one
            if host == None:
                host = rpc.call(context, FLAGS.network_topic,
                                {'method': 'set_network_host',
                                 'args': {'network_ref': network}})
            if host != self.host:
                # need to call allocate_fixed_ip to correct network host
                topic = self.db.queue_get_for(context,
                                              FLAGS.network_topic,
                                              host)
                args = {}
                args['instance_id'] = instance_id
                args['network_id'] = network['id']
                args['vpn'] = vpn

                green_pool.spawn_n(rpc.call, context, topic,
                                   {'method': '_rpc_allocate_fixed_ip',
                                    'args': args})
            else:
                # i am the correct host, run here
                self.allocate_fixed_ip(context, instance_id, network, vpn=vpn)

        # wait for all of the allocates (if any) to finish
        green_pool.waitall()
Пример #57
0
Файл: api.py Проект: xtoddx/nova
 def get(self, context, fixed_range, network_uuid):
     return rpc.call(context, FLAGS.network_topic, {
         'method': 'get_network',
         'args': {
             'network_uuid': network_uuid
         }
     })