Example #1
0
 def test_public_network_association(self):
     """Makes sure that we can allocaate a public ip"""
     # TODO(vish): better way of adding floating ips
     self.context._project = self.projects[0]
     self.context.project_id = self.projects[0].id
     pubnet = IPy.IP(flags.FLAGS.floating_range)
     address = str(pubnet[0])
     try:
         db.floating_ip_get_by_address(context.get_admin_context(), address)
     except exception.NotFound:
         db.floating_ip_create(context.get_admin_context(),
                               {'address': address,
                                'host': FLAGS.host})
     float_addr = self.network.allocate_floating_ip(self.context,
                                                    self.projects[0].id)
     fix_addr = self._create_address(0)
     lease_ip(fix_addr)
     self.assertEqual(float_addr, str(pubnet[0]))
     self.network.associate_floating_ip(self.context, float_addr, fix_addr)
     address = db.instance_get_floating_address(context.get_admin_context(),
                                                self.instance_id)
     self.assertEqual(address, float_addr)
     self.network.disassociate_floating_ip(self.context, float_addr)
     address = db.instance_get_floating_address(context.get_admin_context(),
                                                self.instance_id)
     self.assertEqual(address, None)
     self.network.deallocate_floating_ip(self.context, float_addr)
     self.network.deallocate_fixed_ip(self.context, fix_addr)
     release_ip(fix_addr)
     db.floating_ip_destroy(context.get_admin_context(), float_addr)
Example #2
0
 def _set_machine_id(self, client_factory, instance):
     """
     Set the machine id of the VM for guest tools to pick up and change
     the IP.
     """
     vm_ref = self._get_vm_ref_from_the_name(instance.name)
     if vm_ref is None:
         raise exception.InstanceNotFound(instance_id=instance.id)
     network = db.network_get_by_instance(context.get_admin_context(),
                                         instance['id'])
     mac_addr = instance.mac_address
     net_mask = network["netmask"]
     gateway = network["gateway"]
     ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
                                         instance['id'])
     machine_id_chanfge_spec = \
         vm_util.get_machine_id_change_spec(client_factory, mac_addr,
                                     ip_addr, net_mask, gateway)
     LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
               "with ip - %(ip_addr)s") %
               ({'name': instance.name,
                'ip_addr': ip_addr}))
     reconfig_task = self._session._call_method(self._session._get_vim(),
                        "ReconfigVM_Task", vm_ref,
                        spec=machine_id_chanfge_spec)
     self._session._wait_for_task(instance.id, reconfig_task)
     LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
               "with ip - %(ip_addr)s") %
               ({'name': instance.name,
                'ip_addr': ip_addr}))
def list_vms(host=None):
    """
      make a list of vms and expand out their fixed_ip and floating ips sensibly
    """
    flags.parse_args([])
    my_instances  = []
    if host is None:
        instances = db.instance_get_all(context.get_admin_context())
    else:
        instances = db.instance_get_all_by_host(
                      context.get_admin_context(), host)

    for instance in instances:
        my_inst = {}
        my_inst = dict(instance).copy()
        for (k,v) in my_inst.items():
            try:
                json.encoder(v)
            except TypeError, e:
                v = str(v)
                my_inst[k] = v

        ec2_id = db.get_ec2_instance_id_by_uuid(context.get_admin_context(), instance.uuid)
        ec2_id = 'i-' + hex(int(ec2_id)).replace('0x', '').zfill(8)
        my_inst['ec2_id'] = ec2_id
        try:
                fixed_ips = db.fixed_ip_get_by_instance(context.get_admin_context(), instance.uuid)
        except:
                pass
        my_inst['fixed_ips'] = [ ip.address for ip in fixed_ips ]
        my_inst['floating_ips'] = []
        for ip in fixed_ips:
            my_inst['floating_ips'].extend([ f_ip.address for f_ip in db.floating_ip_get_by_fixed_address(context.get_admin_context(), ip.address)])

        my_instances.append(my_inst)
Example #4
0
def set_instance_error_state_and_notify(instance):
    """
    Set an instance to ERROR state and send out a notification
    """
    # set instance to error state. Instance
    # could be a dictionary when this method is called during
    # virtual machine delete process.
    instance['vm_state'] = vm_states.ERROR
    conductor.API().instance_update(
        context.get_admin_context(), instance['uuid'],
        vm_state=vm_states.ERROR,
        task_state=None)
    instance_name = instance['name']
    host_name = instance['host']
    LOG.warn(_('Unable to find virtual machine %(inst_name)s '
               'on host %(host)s. Set state to ERROR')
             % {'inst_name': instance_name,
                'host': host_name})
    # Send event notification
    note = {'event_type': 'compute.instance.log',
            'msg': _('Unable to find virtual machine {instance_name} on '
                     'host {host_name}. An operation might have been '
                     'performed on the virtual machine outside of PowerVC or'
                     ' the deploy of the virtual machine failed.'
                     'The virtual machine is now set to Error state in the '
                     'database.'),
            'instance_name': instance_name,
            'host_name': host_name}
    notifier = rpc.get_notifier(service='compute', host=host_name)
    notifier.warn(context.get_admin_context(), 'compute.instance.log',
                  note)
Example #5
0
    def test_too_many_addresses(self):
        """Test for a NoMoreAddresses exception when all fixed ips are used.
        """
        admin_context = context.get_admin_context()
        network = db.project_get_network(admin_context, self.projects[0].id)
        num_available_ips = db.network_count_available_ips(admin_context,
                                                           network['id'])
        addresses = []
        instance_ids = []
        for i in range(num_available_ips):
            instance_ref = self._create_instance(0)
            instance_ids.append(instance_ref['id'])
            address = self._create_address(0, instance_ref['id'])
            addresses.append(address)
            lease_ip(address)

        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, 0)
        self.assertRaises(db.NoMoreAddresses,
                          self.network.allocate_fixed_ip,
                          self.context,
                          'foo')

        for i in range(num_available_ips):
            self.network.deallocate_fixed_ip(self.context, addresses[i])
            release_ip(addresses[i])
            db.instance_destroy(context.get_admin_context(), instance_ids[i])
        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, num_available_ips)
    def setUp(self):
        super(FloatingIpsBulkTest, self).setUp()
        pool = CONF.default_floating_pool
        interface = CONF.public_interface

        self.ip_pool = [
            {
                'address': "10.10.10.1",
                'pool': pool,
                'interface': interface,
                'host': None
                },
            {
                'address': "10.10.10.2",
                'pool': pool,
                'interface': interface,
                'host': None
                },
            {
                'address': "10.10.10.3",
                'pool': pool,
                'interface': interface,
                'host': "testHost"
                },
            ]
        self.compute.db.floating_ip_bulk_create(
            context.get_admin_context(), self.ip_pool)

        self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
            context.get_admin_context(), self.ip_pool)
 def tearDown(self):
     # Remove the instance type from the database
     db.api.volume_type_purge(context.get_admin_context(),
                              self.vol_type1['name'])
     db.api.volume_type_purge(context.get_admin_context(),
                              self.vol_type2_noextra['name'])
     super(VolumeTypeExtraSpecsTestCase, self).tearDown()
Example #8
0
 def setUp(self):
     super(CloudpipeTest, self).setUp()
     self.flags(allow_admin_api=True)
     self.app = fakes.wsgi_app()
     inner_app = v2.APIRouter()
     adm_ctxt = context.get_admin_context()
     self.app = auth.InjectContext(adm_ctxt, inner_app)
     route = inner_app.map.match('/1234/os-cloudpipe')
     self.controller = route['controller'].controller
     fakes.stub_out_networking(self.stubs)
     fakes.stub_out_rate_limiting(self.stubs)
     self.stubs.Set(db, "instance_get_all_by_project",
                    db_instance_get_all_by_project)
     self.stubs.Set(db, "security_group_exists",
                    db_security_group_exists)
     self.stubs.SmartSet(self.controller.cloudpipe, "launch_vpn_instance",
                         pipelib_launch_vpn_instance)
     #self.stubs.SmartSet(self.controller.auth_manager, "get_project",
     #                    auth_manager_get_project)
     #self.stubs.SmartSet(self.controller.auth_manager, "get_projects",
     #                    auth_manager_get_projects)
     # NOTE(todd): The above code (just setting the stub, not invoking it)
     # causes failures in AuthManagerLdapTestCase.  So use a fake object.
     self.controller.auth_manager = FakeAuthManager()
     self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
     self.context = context.get_admin_context()
     global EMPTY_INSTANCE_LIST
     EMPTY_INSTANCE_LIST = True
 def test_timestamp_columns(self):
     """
         Test the time stamp columns createEpoch,
         modifiedEpoch and deletedEpoch
     """
     portGrp = PortGroup()
     portGrp.set_id('portGrp-01')
     # Check for createEpoch
     epoch_before = utils.get_current_epoch_ms()
     api.port_group_save(get_admin_context(), portGrp)
     epoch_after = utils.get_current_epoch_ms()
     portGrp_queried = api.port_group_get_by_ids(
         get_admin_context(), [portGrp.get_id()])[0]
     self.assert_(test_utils.is_timestamp_between(
         epoch_before, epoch_after, portGrp_queried.get_createEpoch()))
     # Check for lastModifiedEpoch
     portGrp_modified = portGrp_queried
     test_utils.unset_timestamp_fields(portGrp_modified)
     portGrp_modified.set_name('changed_name')
     epoch_before = utils.get_current_epoch_ms()
     api.port_group_save(get_admin_context(), portGrp_modified)
     epoch_after = utils.get_current_epoch_ms()
     portGrp_queried = api.port_group_get_by_ids(
         get_admin_context(), [portGrp.get_id()])[0]
     self.assert_(portGrp_modified.get_createEpoch(
     ) == portGrp_queried.get_createEpoch())
     self.assert_(test_utils.is_timestamp_between(
         epoch_before,
         epoch_after,
         portGrp_queried.get_lastModifiedEpoch()))
    def test_vm_netadpater_save(self):
        vm = Vm()
        vm.id = 'VM1'
        vmNetAdapter = VmNetAdapter()
        vmNetAdapter.set_id('netAdapter-01')
        vmNetAdapter.set_name('netAdapter-01')
        vmNetAdapter.set_addressType('assigned')
        vmNetAdapter.set_adapterType('E1000')
        vmNetAdapter.set_switchType('vSwitch')
        vmNetAdapter.set_macAddress('00:50:56:81:1c:d0')
        vmNetAdapter.add_ipAddresses('1.1.1.1')
        vmNetAdapter.set_networkName('br100')
        vmNetAdapter.set_vlanId(0)

        vm.add_vmNetAdapters(vmNetAdapter)
        healthnmon_db_api.vm_save(get_admin_context(), vm)
        virual_machines = \
            healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1'
                                                                  ])
        vm_from_db = virual_machines[0]
        netAdapters = vm_from_db.get_vmNetAdapters()
        netAdapter = netAdapters[0]
        self.assertTrue(vmNetAdapter.get_id() == netAdapter.get_id())
        healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id])

        vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(),
                                              [vm.id])
        self.assertTrue(vms is None or len(vms) == 0, 'VM not deleted')
    def _initCache(self):

        # Read from DB all the vmHost objects and populate
        # the cache for each IP if cache is empty

        LOG.info(_('Entering into initCache'))
        computes = db.compute_node_get_all(get_admin_context())
        for compute in computes:
            compute_id = str(compute['id'])
            service = compute['service']
            self._add_compute_to_inventory(compute[
                                           'hypervisor_type'],
                                           compute_id, service['host'])

        vmhosts = api.vm_host_get_all(get_admin_context())
        vms = api.vm_get_all(get_admin_context())
        storageVolumes = api.storage_volume_get_all(get_admin_context())
        subNets = api.subnet_get_all(get_admin_context())
        self._updateInventory(vmhosts)
        self._updateInventory(vms)
        self._updateInventory(storageVolumes)
        self._updateInventory(subNets)

        LOG.info(_('Hosts obtained from db: %s') % str(len(vmhosts)))
        LOG.info(_('Vms obtained from db: %s') % str(len(vms)))
        LOG.info(_('Storage volumes obtained from db: %s') %
                 str(len(storageVolumes)))
        LOG.info(_('Subnets obtained from db: %s') % str(len(subNets)))

        LOG.info(_('Completed the initCache method'))
Example #12
0
 def remove_role(self, uid, role, project_id=None):
     """Remove role for user (or user and project)"""
     if not project_id:
         db.user_remove_role(context.get_admin_context(), uid, role)
         return
     db.user_remove_project_role(context.get_admin_context(),
                                 uid, project_id, role)
Example #13
0
 def add_role(self, uid, role, project_id=None):
     """Add role for user (or user and project)"""
     if not project_id:
         db.user_add_role(context.get_admin_context(), uid, role)
         return
     db.user_add_project_role(context.get_admin_context(),
                              uid, project_id, role)
Example #14
0
 def get_projects(self, uid=None):
     """Retrieve list of projects"""
     if uid:
         result = db.project_get_by_user(context.get_admin_context(), uid)
     else:
         result = db.project_get_all(context.get_admin_context())
     return [self._db_project_to_auth_projectuser(proj) for proj in result]
Example #15
0
    def test_run_kill_vm(self):
        """Detect when a vm is terminated behind the scenes"""
        self.stubs = stubout.StubOutForTesting()
        self.stubs.Set(compute_manager.ComputeManager,
                '_report_driver_status', nop_report_driver_status)

        instance_id = self._create_instance()

        self.compute.run_instance(self.context, instance_id)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("Running instances: %s"), instances)
        self.assertEqual(len(instances), 1)

        instance_name = instances[0].name
        self.compute.driver.test_remove_vm(instance_name)

        # Force the compute manager to do its periodic poll
        error_list = self.compute.periodic_tasks(context.get_admin_context())
        self.assertFalse(error_list)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("After force-killing instances: %s"), instances)
        self.assertEqual(len(instances), 1)
        self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
Example #16
0
 def test_vm_host_get_all_for_vm(self):
     host_id = 'VH1'
     vmhost = VmHost()
     vmhost.id = host_id
     healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
     vm = Vm()
     vm.id = 'VM11'
     vm.set_vmHostId(host_id)
     healthnmon_db_api.vm_save(get_admin_context(), vm)
     vmhosts = \
         healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertFalse(vmhosts is None,
                      'Host get by id returned a none list')
     self.assertTrue(len(vmhosts) > 0,
                     'Host get by id returned invalid number of list'
                     )
     self.assertTrue(vmhosts[0].id == host_id)
     vmids = vmhosts[0].get_virtualMachineIds()
     self.assert_(vmids is not None)
     self.assert_(len(vmids) == 1)
     self.assert_(vm.id in vmids)
     healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id])
     vmhosts = \
         healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertTrue(vmhosts[0].id == host_id)
     vmids = vmhosts[0].get_virtualMachineIds()
     self.assert_((vmids is None) or (len(vmids) == 0))
Example #17
0
    def test_vm_host_get_by_id(self):
        host_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = host_id
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
        vm = Vm()
        vm.id = 'VM11'
        vm.set_vmHostId(host_id)
        healthnmon_db_api.vm_save(get_admin_context(), vm)
        mntPnt = HostMountPoint()
        mntPnt.set_vmHostId(host_id)
        mntPnt.set_path('/path')
        volume = StorageVolume()
        volume.set_id('SV11')
        volume.add_mountPoints(mntPnt)
        healthnmon_db_api.storage_volume_save(get_admin_context(),
                                              volume)

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [host_id])
        self.assertFalse(vmhosts is None,
                         'Host get by id returned a none list')
        self.assertTrue(len(vmhosts) > 0,
                        'Host get by id returned invalid number of list'
                        )
        self.assertTrue(vmhosts[0].id == host_id)
    def test_port_group_delete(self):
        portgroup = PortGroup()
        portgroup.id = 'PG1'
        portgroup.name = 'test'
        portgroup.note = 'note'
        from decimal import Decimal
        portgroup.value = Decimal('123.00')
        portgroup.units = 'uni'
        portgroup.resourceManagerId = 'rm1'
        portgroup.type = 'port'
        portgroup.virtualSwitchId = 'VS1'
        portgroup.vmHostId = 'VM1'
        api.port_group_save(get_admin_context(), portgroup)

        pgs = api.port_group_get_by_ids(get_admin_context(),
                                        [portgroup.id])
        self.assertFalse(len(pgs) == 0, 'Portgroup could not be saved')

        api.port_group_delete_by_ids(get_admin_context(),
                                     [portgroup.id])
        portgroups = api.port_group_get_by_ids(get_admin_context(),
                                               [portgroup.id])

        self.assertTrue(portgroups is None or len(portgroups) == 0,
                        'port group not deleted')
Example #19
0
    def test_vm_host_get_all_for_sv(self):
        host_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = host_id
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
        mntPnt = HostMountPoint()
        mntPnt.set_vmHostId(host_id)
        mntPnt.set_path('/path')
        volume = StorageVolume()
        volume.set_id('SV11')
        volume.add_mountPoints(mntPnt)
        healthnmon_db_api.storage_volume_save(get_admin_context(),
                                              volume)

        vmhosts = \
            healthnmon_db_api.vm_host_get_all(get_admin_context())
        self.assertFalse(vmhosts is None,
                         'Host get by id returned a none list')
        self.assertTrue(len(vmhosts) > 0,
                        'Host get by id returned invalid number of list'
                        )
        self.assertTrue(vmhosts[0].id == host_id)
        svlist = vmhosts[0].get_storageVolumeIds()
        self.assert_(svlist is not None)
        self.assert_(len(svlist) == 1)
        self.assert_(volume.get_id() in svlist)

        healthnmon_db_api.storage_volume_delete_by_ids(
            get_admin_context(), [volume.get_id()])
        vmhosts = \
            healthnmon_db_api.vm_host_get_all(get_admin_context())
        self.assertTrue(vmhosts[0].id == host_id)
        svids = vmhosts[0].get_storageVolumeIds()
        self.assert_((svids is None) or (len(svids) == 0))
    def test_vm_save(self):
        '''
        Insert a vm object into db and check
        whether we are getting proper values after retrieval
        '''
        vm = Vm()
        vm.id = 'VM1-id'
        vm.name = 'VM1-Name'
        vmScsiController = VmScsiController()
        vmScsiController.set_id('VM_CTRL_1')
        vmScsiController.set_id('some_type')
        vm.add_vmScsiControllers(vmScsiController)
        healthnmon_db_api.vm_save(get_admin_context(), vm)

        vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id])
        self.assertTrue(vms is not None)
        self.assertTrue(len(vms) == 1)
        self.assertEqual(vms[0].get_id(), 'VM1-id', "VM id is not same")
        self.assertEqual(vms[0].get_name(), 'VM1-Name', "VM name is not same")
        self.assert_(len(vms[0].get_vmScsiControllers(
        )) == 1, "vmScsiController len mismatch")
        self.assert_(vms[0].get_vmScsiControllers()[0].get_id(
        ) == vmScsiController.get_id(), "vmScsiController id mismatch")
        self.assert_(vms[0].get_vmScsiControllers()[0].get_type() ==
                     vmScsiController.get_type(),
                     "vmScsiController type mismatch")
    def test_vm_save_update(self):
        '''
        Update an existing object in db
        '''
        vm = Vm()
        vm.id = 'VM1-id'
        healthnmon_db_api.vm_save(get_admin_context(), vm)

        vmGlobalSettings = VmGlobalSettings()
        vmGlobalSettings.set_id(vm.id)
        vmGlobalSettings.set_autoStartAction('autoStartAction')
        vmGlobalSettings.set_autoStopAction('autoStopAction')
        vm.set_vmGlobalSettings(vmGlobalSettings)
        healthnmon_db_api.vm_save(get_admin_context(), vm)

        vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id])
        self.assertTrue(vms is not None)
        self.assertTrue(len(vms) == 1)
        vm = vms[0]
        self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same")
        vmGlobalSets = vm.get_vmGlobalSettings()
        self.assertTrue(vmGlobalSets is not None)
        self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same")
        self.assertEqual(vmGlobalSets.get_autoStartAction(),
                         'autoStartAction', "autoStartAction is not same")
        self.assertEqual(vmGlobalSets.get_autoStopAction(),
                         'autoStopAction', "autoStopAction is not same")
Example #22
0
 def init_host(self):
     # Initialize general L3 networking
     self.l3driver.initialize()
     super(QuantumManager, self).init_host()
     # Initialize floating ip support (only works for nova ipam currently)
     if FLAGS.quantum_ipam_lib == 'nova.network.quantum.nova_ipam_lib':
         LOG.debug("Initializing FloatingIP support")
         self.init_host_floating_ips()
     # Set up all the forwarding rules for any network that has a
     # gateway set.
     networks = self.get_all_networks(context.get_admin_context())
     cidrs = []
     for net in networks:
         # Don't update host information for network that does not
         # belong to you
         if net['host'] != self.host:
             continue
         if net['gateway']:
             LOG.debug("Initializing NAT: %s (cidr: %s, gw: %s)" % (
                 net['label'], net['cidr'], net['gateway']))
             cidrs.append(net['cidr'])
         self._update_network_host(context.get_admin_context(),
                                   net['uuid'])
     # .. and for each network
     for c in cidrs:
         self.l3driver.initialize_network(c)
Example #23
0
 def test_unreserve(self):
     db.fixed_ip_update(context.get_admin_context(), '10.0.0.100',
                        {'reserved': True})
     self.commands.unreserve('10.0.0.100')
     address = db.fixed_ip_get_by_address(context.get_admin_context(),
                                          '10.0.0.100')
     self.assertEqual(address['reserved'], False)
Example #24
0
    def list(self, host=None):
        """Lists all fixed ips (optionally by host)."""
        ctxt = context.get_admin_context()

        try:
            if host is None:
                fixed_ips = db.fixed_ip_get_all(ctxt)
            else:
                fixed_ips = db.fixed_ip_get_by_host(ctxt, host)

        except exception.NotFound as ex:
            print(_("error: %s") % ex)
            return(2)

        instances = db.instance_get_all(context.get_admin_context())
        instances_by_uuid = {}
        for instance in instances:
            instances_by_uuid[instance['uuid']] = instance

        print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
                                              _('IP address'),
                                              _('hostname'),
                                              _('host')))

        all_networks = {}
        try:
            # use network_get_all to retrieve all existing networks
            # this is to ensure that IPs associated with deleted networks
            # will not throw exceptions.
            for network in db.network_get_all(context.get_admin_context()):
                all_networks[network.id] = network
        except exception.NoNetworksFound:
            # do not have any networks, so even if there are IPs, these
            # IPs should have been deleted ones, so return.
            print(_('No fixed IP found.'))
            return

        has_ip = False
        for fixed_ip in fixed_ips:
            hostname = None
            host = None
            network = all_networks.get(fixed_ip['network_id'])
            if network:
                has_ip = True
                if fixed_ip.get('instance_uuid'):
                    instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
                    if instance:
                        hostname = instance['hostname']
                        host = instance['host']
                    else:
                        print(_('WARNING: fixed ip %s allocated to missing'
                                ' instance') % str(fixed_ip['address']))
                print("%-18s\t%-15s\t%-15s\t%s" % (
                        network['cidr'],
                        fixed_ip['address'],
                        hostname, host))

        if not has_ip:
            print(_('No fixed IP found.'))
Example #25
0
def is_allocated_in_project(address, project_id):
    """Returns true if address is in specified project"""
    project_net = db.project_get_network(context.get_admin_context(),
                                         project_id)
    network = db.fixed_ip_get_network(context.get_admin_context(), address)
    instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
    # instance exists until release
    return instance is not None and network['id'] == project_net['id']
Example #26
0
 def test_admin_no_overwrite(self):
     # If there is already a context in the cache creating an admin
     # context will not overwrite it.
     ctx1 = context.RequestContext('111',
                                   '222',
                                   overwrite=True)
     context.get_admin_context()
     self.assertIs(o_context.get_current(), ctx1)
Example #27
0
 def _validate_user_and_project(self, user_id, project_id):
     user = db.user_get(context.get_admin_context(), user_id)
     if not user:
         raise exception.UserNotFound(user_id=user_id)
     project = db.project_get(context.get_admin_context(), project_id)
     if not project:
         raise exception.ProjectNotFound(project_id=project_id)
     return user, project
Example #28
0
 def _validate_user_and_project(self, user_id, project_id):
     user = db.user_get(context.get_admin_context(), user_id)
     if not user:
         raise exception.NotFound(_('User "%s" not found') % user_id)
     project = db.project_get(context.get_admin_context(), project_id)
     if not project:
         raise exception.NotFound(_('Project "%s" not found') % project_id)
     return user, project
Example #29
0
    def test_vm_host_save_update_with_new_vSwitch(self):
        host_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = host_id

        vSwitch = VirtualSwitch()
        vSwitch.set_id('vSwitch-01')
        vSwitch.set_name('vSwitch-01')
        vSwitch.set_resourceManagerId('rmId')
        vSwitch.set_switchType('vSwitch')

        cost1 = Cost()
        cost1.set_value(100)
        cost1.set_units('USD')
        vSwitch.set_cost(cost1)

        portGroup = PortGroup()
        portGroup.set_id('pg-01')
        portGroup.set_name('pg-01')
        portGroup.set_resourceManagerId('rmId')
        portGroup.set_type('portgroup_type')
        portGroup.set_cost(cost1)
        vSwitch.add_portGroups(portGroup)
        vmhost.add_virtualSwitches(vSwitch)
        vmhost.add_portGroups(portGroup)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vSwitch_new = VirtualSwitch()
        vSwitch_new.set_id('vSwitch-02')
        vSwitch_new.set_name('vSwitch-02')
        vSwitch_new.set_resourceManagerId('rmId')
        vSwitch_new.set_switchType('vSwitch')

        portGroup_new = PortGroup()
        portGroup_new.set_id('pg-02')
        portGroup_new.set_name('pg-02')
        portGroup_new.set_resourceManagerId('rmId')
        portGroup_new.set_type('portgroup_type')
        vSwitch.add_portGroups(portGroup_new)
        vmhost.add_virtualSwitches(vSwitch_new)
        vmhost.add_portGroups(portGroup_new)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [host_id])
        self.assertFalse(vmhosts is None,
                         'Host get by id returned a none list')
        self.assertTrue(len(vmhosts) > 0,
                        'Host get by id returned invalid number of list'
                        )
        self.assertTrue(
            len(vmhosts[0].get_virtualSwitches()) > 0,
            'Host get by virtual switch returned invalid number of list')
        self.assertTrue(
            len(vmhosts[0].get_portGroups()) > 0,
            'Host get by port group returned invalid number of list')
        self.assertTrue(vmhosts[0].id == host_id)
Example #30
0
 def get_user_roles(self, uid, project_id=None):
     """Retrieve list of roles for user (or user and project)"""
     if project_id is None:
         roles = db.user_get_roles(context.get_admin_context(), uid)
         return roles
     else:
         roles = db.user_get_roles_for_project(context.get_admin_context(),
                                               uid, project_id)
         return roles
Example #31
0
 def get_user(self, uid):
     """Retrieve user by id"""
     user = db.user_get(context.get_admin_context(), uid)
     return self._db_user_to_auth_user(user)
Example #32
0
 def get_project(self, pid):
     """Retrieve project by id"""
     project = db.project_get(context.get_admin_context(), pid)
     return self._db_project_to_auth_projectuser(project)
 def setUp(self):
     super(VersionsTest, self).setUp()
     self.context = context.get_admin_context()
Example #34
0
def stub_instance(id=1, user_id=None, project_id=None, host=None,
                  node=None, vm_state=None, task_state=None,
                  reservation_id="", uuid=FAKE_UUID, image_ref=FAKE_UUID,
                  flavor_id="1", name=None, key_name='',
                  access_ipv4=None, access_ipv6=None, progress=0,
                  auto_disk_config=False, display_name=None,
                  display_description=None,
                  include_fake_metadata=True, config_drive=None,
                  power_state=None, nw_cache=None, metadata=None,
                  security_groups=None, root_device_name=None,
                  limit=None, marker=None,
                  launched_at=timeutils.utcnow(),
                  terminated_at=timeutils.utcnow(),
                  availability_zone='', locked_by=None, cleaned=False,
                  memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
                  flavor=None, launch_index=0, kernel_id="",
                  ramdisk_id="", user_data=None, system_metadata=None,
                  services=None, trusted_certs=None, hidden=False):
    if user_id is None:
        user_id = 'fake_user'
    if project_id is None:
        project_id = 'fake_project'

    if metadata:
        metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
    elif include_fake_metadata:
        metadata = [models.InstanceMetadata(key='seq', value=str(id))]
    else:
        metadata = []

    sys_meta = flavors.save_flavor_info(
        {}, flavors.get_flavor_by_flavor_id(int(flavor_id)))
    sys_meta.update(system_metadata or {})

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    if security_groups is None:
        security_groups = [{"id": 1, "name": "test", "description": "Foo:",
                            "project_id": "project", "user_id": "user",
                            "created_at": None, "updated_at": None,
                            "deleted_at": None, "deleted": False}]

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    info_cache = create_info_cache(nw_cache)

    if flavor is None:
        flavor = objects.Flavor.get_by_name(
            context.get_admin_context(), 'm1.small')
    flavorinfo = jsonutils.dumps({
        'cur': flavor.obj_to_primitive(),
        'old': None,
        'new': None,
    })

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
        "deleted": None,
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": kernel_id,
        "ramdisk_id": ramdisk_id,
        "hostname": display_name or server_name,
        "launch_index": launch_index,
        "key_name": key_name,
        "key_data": key_data,
        "power_state": power_state,
        "vm_state": vm_state or vm_states.ACTIVE,
        "task_state": task_state,
        "services": services,
        "memory_mb": memory_mb,
        "vcpus": vcpus,
        "root_gb": root_gb,
        "ephemeral_gb": ephemeral_gb,
        "ephemeral_key_uuid": None,
        "host": host,
        "node": node,
        "instance_type_id": flavor.id,
        "user_data": user_data,
        "reservation_id": reservation_id,
        "launched_at": launched_at,
        "terminated_at": terminated_at,
        "availability_zone": availability_zone,
        "display_name": display_name or server_name,
        "display_description": display_description,
        "launched_on": "",
        "locked": locked_by is not None,
        "locked_by": locked_by,
        "os_type": "",
        "architecture": "",
        "vm_mode": "",
        "uuid": uuid,
        "root_device_name": root_device_name,
        "default_ephemeral_device": "",
        "default_swap_device": "",
        "config_drive": config_drive,
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "auto_disk_config": auto_disk_config,
        "progress": progress,
        "shutdown_terminate": True,
        "disable_terminate": False,
        "cell_name": "",
        "metadata": metadata,
        "system_metadata": utils.dict_to_metadata(sys_meta),
        "security_groups": security_groups,
        "cleaned": cleaned,
        "pci_devices": [],
        "extra": {"numa_topology": None,
                  "pci_requests": None,
                  "flavor": flavorinfo,
                  "trusted_certs": trusted_certs,
                  },
        "tags": [],
        "hidden": hidden,
        "name": "instance-%s" % id,
    }

    instance.update(info_cache)
    instance['info_cache']['instance_uuid'] = instance['uuid']

    return instance
Example #35
0
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
    """Retrieve flavor access list by flavor id."""
    if ctxt is None:
        ctxt = context.get_admin_context()

    return db.flavor_access_get_by_flavor_id(ctxt, flavorid)
Example #36
0
    def test_run_with_snapshot(self):
        # Makes sure run/stop/start instance with snapshot works.
        availability_zone = 'zone1:host1'
        vol1 = self.cloud.create_volume(self.context,
                                        size=1,
                                        availability_zone=availability_zone)

        snap1 = self.cloud.create_snapshot(self.context,
                                           vol1['volumeId'],
                                           name='snap-1',
                                           description='test snap of vol %s' %
                                           vol1['volumeId'])
        snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])

        snap2 = self.cloud.create_snapshot(self.context,
                                           vol1['volumeId'],
                                           name='snap-2',
                                           description='test snap of vol %s' %
                                           vol1['volumeId'])
        snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])

        kwargs = {
            'image_id':
            'ami-1',
            'instance_type':
            CONF.default_instance_type,
            'max_count':
            1,
            'block_device_mapping': [{
                'device_name': '/dev/vdb',
                'snapshot_id': snap1_uuid,
                'delete_on_termination': False,
            }, {
                'device_name': '/dev/vdc',
                'snapshot_id': snap2_uuid,
                'delete_on_termination': True
            }]
        }
        ec2_instance_id = self._run_instance(**kwargs)
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
                                                     ec2_instance_id)

        vols = self.volume_api.get_all(self.context)
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]

        self.assertEqual(len(vols), 2)

        vol1_id = None
        vol2_id = None
        for vol in vols:
            snapshot_uuid = vol['snapshot_id']
            if snapshot_uuid == snap1_uuid:
                vol1_id = vol['id']
                mountpoint = '/dev/vdb'
            elif snapshot_uuid == snap2_uuid:
                vol2_id = vol['id']
                mountpoint = '/dev/vdc'
            else:
                self.fail()

            self._assert_volume_attached(vol, instance_uuid, mountpoint)

        #Just make sure we found them
        self.assertTrue(vol1_id)
        self.assertTrue(vol2_id)

        self.cloud.terminate_instances(self.context, [ec2_instance_id])

        admin_ctxt = context.get_admin_context(read_deleted="no")
        vol = self.volume_api.get(admin_ctxt, vol1_id)
        self._assert_volume_detached(vol)
        self.assertFalse(vol['deleted'])
class FakeRequest(object):
    environ = {"nova.context": context.get_admin_context()}

    def get_db_flavor(self, flavor_id):
        return INSTANCE_TYPES[flavor_id]
Example #38
0
    def test_availability_zone_detail(self):
        def _formatZone(zone_dict):
            result = []

            # Zone tree view item
            result.append({
                'zone_name':
                zone_dict['zone_name'],
                'zone_state':
                u'available'
                if zone_dict['zone_state']['available'] else u'not available'
            })

            if zone_dict['hosts'] is not None:
                for (host, services) in zone_dict['hosts'].items():
                    # Host tree view item
                    result.append({
                        'zone_name': u'|- %s' % host,
                        'zone_state': u''
                    })
                    for (svc, state) in services.items():
                        # Service tree view item
                        result.append({
                            'zone_name':
                            u'| |- %s' % svc,
                            'zone_state':
                            u'%s %s %s' %
                            ('enabled' if state['active'] else 'disabled',
                             ':-)' if state['available'] else 'XXX',
                             jsonutils.to_primitive(state['updated_at']))
                        })
            return result

        def _assertZone(zone, name, status):
            self.assertEqual(zone['zone_name'], name)
            self.assertEqual(zone['zone_state'], status)

        availabilityZone = availability_zone.AvailabilityZoneController()

        req = webob.Request.blank('/v3/os-availability-zone/detail')
        req.method = 'GET'
        req.environ['nova.context'] = context.get_admin_context()
        resp_dict = availabilityZone.detail(req)

        self.assertIn('availability_zone_info', resp_dict)
        zones = resp_dict['availability_zone_info']
        self.assertEqual(len(zones), 3)
        ''' availabilityZoneInfo field content in response body:
        [{'zone_name': 'zone-1',
          'zone_state': {'available': True},
          'hosts': {'fake_host-1': {
                        'nova-compute': {'active': True, 'available': True,
                          'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}},
         {'zone_name': 'internal',
          'zone_state': {'available': True},
          'hosts': {'fake_host-1': {
                        'nova-sched': {'active': True, 'available': True,
                          'updated_at': datetime(2012, 12, 26, 14, 45, 25)}},
                    'fake_host-2': {
                        'nova-network': {'active': True, 'available': False,
                          'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}},
         {'zone_name': 'zone-2',
          'zone_state': {'available': False},
          'hosts': None}]
        '''

        l0 = [u'zone-1', u'available']
        l1 = [u'|- fake_host-1', u'']
        l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000']
        l3 = [u'internal', u'available']
        l4 = [u'|- fake_host-1', u'']
        l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000']
        l6 = [u'|- fake_host-2', u'']
        l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000']
        l8 = [u'zone-2', u'not available']

        z0 = _formatZone(zones[0])
        z1 = _formatZone(zones[1])
        z2 = _formatZone(zones[2])

        self.assertEqual(len(z0), 3)
        self.assertEqual(len(z1), 5)
        self.assertEqual(len(z2), 1)

        _assertZone(z0[0], l0[0], l0[1])
        _assertZone(z0[1], l1[0], l1[1])
        _assertZone(z0[2], l2[0], l2[1])
        _assertZone(z1[0], l3[0], l3[1])
        _assertZone(z1[1], l4[0], l4[1])
        _assertZone(z1[2], l5[0], l5[1])
        _assertZone(z1[3], l6[0], l6[1])
        _assertZone(z1[4], l7[0], l7[1])
        _assertZone(z2[0], l8[0], l8[1])
Example #39
0
    def test_stop_start_with_volume(self):
        # Make sure run instance with block device mapping works.
        availability_zone = 'zone1:host1'
        vol1 = self.cloud.create_volume(self.context,
                                        size=1,
                                        availability_zone=availability_zone)
        vol2 = self.cloud.create_volume(self.context,
                                        size=1,
                                        availability_zone=availability_zone)
        vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
        vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
        # enforce periodic tasks run in short time to avoid wait for 60s.
        self._restart_compute_service(periodic_interval_max=0.3)

        kwargs = {
            'image_id':
            'ami-1',
            'instance_type':
            CONF.default_instance_type,
            'max_count':
            1,
            'block_device_mapping': [
                {
                    'device_name': '/dev/sdb',
                    'volume_id': vol1_uuid,
                    'delete_on_termination': False
                },
                {
                    'device_name': '/dev/sdc',
                    'volume_id': vol2_uuid,
                    'delete_on_termination': True
                },
            ]
        }
        ec2_instance_id = self._run_instance(**kwargs)
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
                                                     ec2_instance_id)
        vols = self.volume_api.get_all(self.context)
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]

        self.assertEqual(len(vols), 2)
        for vol in vols:
            self.assertTrue(
                str(vol['id']) == str(vol1_uuid)
                or str(vol['id']) == str(vol2_uuid))
            if str(vol['id']) == str(vol1_uuid):
                self.volume_api.attach(self.context, vol['id'], instance_uuid,
                                       '/dev/sdb')
            elif str(vol['id']) == str(vol2_uuid):
                self.volume_api.attach(self.context, vol['id'], instance_uuid,
                                       '/dev/sdc')

        vol = self.volume_api.get(self.context, vol1_uuid)
        self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')

        vol = self.volume_api.get(self.context, vol2_uuid)
        self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')

        result = self.cloud.stop_instances(self.context, [ec2_instance_id])
        self.assertTrue(result)

        vol = self.volume_api.get(self.context, vol1_uuid)
        self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')

        vol = self.volume_api.get(self.context, vol1_uuid)
        self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')

        vol = self.volume_api.get(self.context, vol2_uuid)
        self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')

        self.cloud.start_instances(self.context, [ec2_instance_id])
        vols = self.volume_api.get_all(self.context)
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
        self.assertEqual(len(vols), 2)
        for vol in vols:
            self.assertTrue(
                str(vol['id']) == str(vol1_uuid)
                or str(vol['id']) == str(vol2_uuid))
            self.assertTrue(vol['mountpoint'] == '/dev/sdb'
                            or vol['mountpoint'] == '/dev/sdc')
            self.assertEqual(vol['instance_uuid'], instance_uuid)
            self.assertEqual(vol['status'], "in-use")
            self.assertEqual(vol['attach_status'], "attached")

        #Here we puke...
        self.cloud.terminate_instances(self.context, [ec2_instance_id])

        admin_ctxt = context.get_admin_context(read_deleted="no")
        vol = self.volume_api.get(admin_ctxt, vol2_uuid)
        self.assertFalse(vol['deleted'])
        self.cloud.delete_volume(self.context, vol1['volumeId'])
        self._restart_compute_service()
Example #40
0
File: manager.py Project: bgh/nova
 def get_all_networks(self):
     networks = []
     admin_context = context.get_admin_context()
     networks.extend(self.ipam.get_global_networks(admin_context))
     networks.extend(self.ipam.get_project_networks(admin_context))
     return networks
Example #41
0
from nova import context
from egodocker.pod import driver
import uuid

name = str(uuid.uuid4())
docker_driver = driver.DockerDriver()
admin_context = context.get_admin_context()
tenant_id = 'ffd9435ec24d4d11b6e1a97c2ff8e64c'
container = {
    'hostname': 'sshd',
    'mem_limit': '200m',
    'command': '/usr/sbin/sshd -D',
    'image': 'sshd',
    'name': name
}

net_opts = {
    'network_id': 'c3dd16d9-c201-46f1-b231-03b1f6cc3238',
    'zone': 'compute:nova'
}
container_id = docker_driver.spawn(admin_context,
                                   container,
                                   tenant_id,
                                   'prsdemo3',
                                   network_mode='neutron',
                                   network_opts=net_opts)
print container_id
print 'container succeessfully spawned'

docker_driver.destroy(admin_context,
                      name,
Example #42
0
 def delete_user(self, id):
     """Delete a user"""
     user = db.user_get(context.get_admin_context(), id)
     db.user_delete(context.get_admin_context(), user['id'])
    def host_maintenance_mode(self, host, mode):
        """Start/Stop host maintenance window. On start, it triggers
        guest VMs evacuation.
        """
        if not mode:
            return 'off_maintenance'
        host_list = [host_ref for host_ref in
                     self._session.host.get_all()
                     if host_ref != self._session.host_ref]
        migrations_counter = vm_counter = 0
        ctxt = context.get_admin_context()
        for vm_ref, vm_rec in vm_utils.list_vms(self._session):
            for host_ref in host_list:
                try:
                    # Ensure only guest instances are migrated
                    uuid = vm_rec['other_config'].get('nova_uuid')
                    if not uuid:
                        name = vm_rec['name_label']
                        uuid = _uuid_find(ctxt, host, name)
                        if not uuid:
                            LOG.info('Instance %(name)s running on '
                                     '%(host)s could not be found in '
                                     'the database: assuming it is a '
                                     'worker VM and skip ping migration '
                                     'to a new host',
                                     {'name': name, 'host': host})
                            continue
                    instance = objects.Instance.get_by_uuid(ctxt, uuid)
                    vm_counter = vm_counter + 1

                    aggregate = objects.AggregateList.get_by_host(
                        ctxt, host, key=pool_states.POOL_FLAG)
                    if not aggregate:
                        msg = _('Aggregate for host %(host)s count not be'
                                ' found.') % dict(host=host)
                        raise exception.NotFound(msg)

                    dest = _host_find(ctxt, self._session, aggregate[0],
                                      host_ref)
                    instance.host = dest
                    instance.task_state = task_states.MIGRATING
                    instance.save()

                    self._session.VM.pool_migrate(vm_ref, host_ref,
                                                  {"live": "true"})
                    migrations_counter = migrations_counter + 1

                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

                    break
                except XenAPI.Failure:
                    LOG.exception(_('Unable to migrate VM %(vm_ref)s '
                                    'from %(host)s'),
                                  {'vm_ref': vm_ref, 'host': host})
                    instance.host = host
                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

        if vm_counter == migrations_counter:
            return 'on_maintenance'
        else:
            raise exception.NoValidHost(reason=_('Unable to find suitable '
                                                 'host for VMs evacuation'))
Example #44
0
def add_flavor_access(flavorid, projectid, ctxt=None):
    """Add flavor access for project."""
    if ctxt is None:
        ctxt = context.get_admin_context()

    return db.flavor_access_add(ctxt, flavorid, projectid)
Example #45
0
 def assertInstanceHasNoSecret(self, server):
     ctx = nova_context.get_admin_context()
     instance = objects.Instance.get_by_uuid(ctx, server['id'])
     self.assertNotIn('vtpm_secret_uuid', instance.system_metadata)
     self.assertEqual(0, len(self.key_mgr._passphrases))
Example #46
0
def remove_flavor_access(flavorid, projectid, ctxt=None):
    """Remove flavor access for project."""
    if ctxt is None:
        ctxt = context.get_admin_context()

    return db.flavor_access_remove(ctxt, flavorid, projectid)
Example #47
0
def create(name,
           memory,
           vcpus,
           root_gb,
           ephemeral_gb=0,
           flavorid=None,
           swap=0,
           rxtx_factor=1.0,
           is_public=True):
    """Creates flavors."""
    if not flavorid:
        flavorid = uuid.uuid4()

    kwargs = {
        'memory_mb': memory,
        'vcpus': vcpus,
        'root_gb': root_gb,
        'ephemeral_gb': ephemeral_gb,
        'swap': swap,
        'rxtx_factor': rxtx_factor,
    }

    # ensure name do not exceed 255 characters
    utils.check_string_length(name, 'name', min_length=1, max_length=255)

    # ensure name does not contain any special characters
    invalid_name = INVALID_NAME_REGEX.search(name)
    if invalid_name:
        msg = _("names can only contain [a-zA-Z0-9_.- ]")
        raise exception.InvalidInput(reason=msg)

    # Some attributes are positive ( > 0) integers
    for option in ['memory_mb', 'vcpus']:
        try:
            assert int(str(kwargs[option])) > 0
            kwargs[option] = int(kwargs[option])
        except (ValueError, AssertionError, TypeError):
            msg = _("'%s' argument must be a positive integer") % option
            raise exception.InvalidInput(reason=msg)

    # Some attributes are non-negative ( >= 0) integers
    for option in ['root_gb', 'ephemeral_gb', 'swap']:
        try:
            assert int(str(kwargs[option])) >= 0
            kwargs[option] = int(kwargs[option])
        except (ValueError, AssertionError, TypeError):
            msg = _("'%s' argument must be an integer greater than or"
                    " equal to 0") % option
            raise exception.InvalidInput(reason=msg)

    # rxtx_factor should be a positive float
    try:
        kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
        assert kwargs['rxtx_factor'] > 0
    except (ValueError, AssertionError):
        msg = _("'rxtx_factor' argument must be a positive float")
        raise exception.InvalidInput(reason=msg)

    kwargs['name'] = name
    # NOTE(vish): Internally, flavorid is stored as a string but it comes
    #             in through json as an integer, so we convert it here.
    kwargs['flavorid'] = unicode(flavorid)

    # ensure is_public attribute is boolean
    try:
        kwargs['is_public'] = strutils.bool_from_string(is_public, strict=True)
    except ValueError:
        raise exception.InvalidInput(reason=_("is_public must be a boolean"))

    try:
        return db.flavor_create(context.get_admin_context(), kwargs)
    except db_exc.DBError as e:
        LOG.exception(_('DB error: %s') % e)
        raise exception.InstanceTypeCreateFailed()
Example #48
0
    def start_fixture(self):
        super(AllocationFixture, self).start_fixture()
        self.context = context.get_admin_context()

        # For use creating and querying allocations/usages
        os.environ['ALT_USER_ID'] = uuidutils.generate_uuid()
        project_id = os.environ['PROJECT_ID']
        user_id = os.environ['USER_ID']
        alt_user_id = os.environ['ALT_USER_ID']

        # Stealing from the super
        rp_name = os.environ['RP_NAME']
        rp_uuid = os.environ['RP_UUID']
        rp = rp_obj.ResourceProvider(
            self.context, name=rp_name, uuid=rp_uuid)
        rp.create()

        # Create some DISK_GB inventory and allocations.
        consumer_id = uuidutils.generate_uuid()
        inventory = rp_obj.Inventory(
            self.context, resource_provider=rp,
            resource_class='DISK_GB', total=2048,
            step_size=10, min_unit=10, max_unit=600)
        inventory.obj_set_defaults()
        rp.add_inventory(inventory)
        alloc1 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='DISK_GB',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=user_id,
            used=500)
        alloc2 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='DISK_GB',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=user_id,
            used=500)
        alloc_list = rp_obj.AllocationList(
            self.context,
            objects=[alloc1, alloc2]
        )
        alloc_list.create_all()

        # Create some VCPU inventory and allocations.
        consumer_id = uuidutils.generate_uuid()
        os.environ['CONSUMER_ID'] = consumer_id
        inventory = rp_obj.Inventory(
            self.context, resource_provider=rp,
            resource_class='VCPU', total=10,
            max_unit=4)
        inventory.obj_set_defaults()
        rp.add_inventory(inventory)
        alloc1 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='VCPU',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=user_id,
            used=2)
        alloc2 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='VCPU',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=user_id,
            used=4)
        alloc_list = rp_obj.AllocationList(
                self.context,
                objects=[alloc1, alloc2])
        alloc_list.create_all()

        # Create a couple of allocations for a different user.
        consumer_id = uuidutils.generate_uuid()
        alloc1 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='DISK_GB',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=alt_user_id,
            used=20)
        alloc2 = rp_obj.Allocation(
            self.context, resource_provider=rp,
            resource_class='VCPU',
            consumer_id=consumer_id,
            project_id=project_id,
            user_id=alt_user_id,
            used=1)
        alloc_list = rp_obj.AllocationList(
                self.context,
                objects=[alloc1, alloc2])
        alloc_list.create_all()

        # The ALT_RP_XXX variables are for a resource provider that has
        # not been created in the Allocation fixture
        os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid()
        os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
Example #49
0
File: base.py Project: xishian/nova
    def __init__(self,
                 instance,
                 address=None,
                 content=None,
                 extra_md=None,
                 network_info=None,
                 vd_driver=None,
                 network_metadata=None,
                 request_context=None):
        """Creation of this object should basically cover all time consuming
        collection.  Methods after that should not cause time delays due to
        network operations or lengthy cpu operations.

        The user should then get a single instance and make multiple method
        calls on it.
        """
        if not content:
            content = []

        ctxt = context.get_admin_context()

        # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN
        self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
        self.instance = instance
        self.extra_md = extra_md

        self.availability_zone = az.get_instance_availability_zone(
            ctxt, instance)

        secgroup_api = openstack_driver.get_openstack_security_group_driver()
        self.security_groups = secgroup_api.get_instance_security_groups(
            ctxt, instance)

        self.mappings = _format_instance_mapping(ctxt, instance)

        if instance.user_data is not None:
            self.userdata_raw = base64.decode_as_bytes(instance.user_data)
        else:
            self.userdata_raw = None

        self.address = address

        # expose instance metadata.
        self.launch_metadata = utils.instance_meta(instance)

        self.password = password.extract_password(instance)

        self.uuid = instance.uuid

        self.content = {}
        self.files = []

        # get network info, and the rendered network template
        if network_info is None:
            network_info = instance.info_cache.network_info

        # expose network metadata
        if network_metadata is None:
            self.network_metadata = netutils.get_network_metadata(network_info)
        else:
            self.network_metadata = network_metadata

        self.ip_info = \
                ec2utils.get_ip_info_for_instance_from_nw_info(network_info)

        self.network_config = None
        cfg = netutils.get_injected_network_template(network_info)

        if cfg:
            key = "%04i" % len(self.content)
            self.content[key] = cfg
            self.network_config = {
                "name": "network_config",
                'content_path': "/%s/%s" % (CONTENT_DIR, key)
            }

        # 'content' is passed in from the configdrive code in
        # nova/virt/libvirt/driver.py.  That's how we get the injected files
        # (personalities) in. AFAIK they're not stored in the db at all,
        # so are not available later (web service metadata time).
        for (path, contents) in content:
            key = "%04i" % len(self.content)
            self.files.append({
                'path': path,
                'content_path': "/%s/%s" % (CONTENT_DIR, key)
            })
            self.content[key] = contents

        if vd_driver is None:
            vdclass = importutils.import_class(CONF.vendordata_driver)
        else:
            vdclass = vd_driver

        self.vddriver = vdclass(instance=instance,
                                address=address,
                                extra_md=extra_md,
                                network_info=network_info)

        self.route_configuration = None

        # NOTE(mikal): the decision to not pass extra_md here like we
        # do to the StaticJSON driver is deliberate. extra_md will
        # contain the admin password for the instance, and we shouldn't
        # pass that to external services.
        self.vendordata_providers = {
            'StaticJSON':
            vendordata_json.JsonFileVendorData(instance=instance,
                                               address=address,
                                               extra_md=extra_md,
                                               network_info=network_info),
            'DynamicJSON':
            vendordata_dynamic.DynamicVendorData(instance=instance,
                                                 address=address,
                                                 network_info=network_info,
                                                 context=request_context)
        }
Example #50
0
    def start_fixture(self):
        super(SharedStorageFixture, self).start_fixture()
        self.context = context.get_admin_context()

        cn1_uuid = uuidutils.generate_uuid()
        cn2_uuid = uuidutils.generate_uuid()
        ss_uuid = uuidutils.generate_uuid()
        agg_uuid = uuidutils.generate_uuid()
        os.environ['CN1_UUID'] = cn1_uuid
        os.environ['CN2_UUID'] = cn2_uuid
        os.environ['SS_UUID'] = ss_uuid
        os.environ['AGG_UUID'] = agg_uuid

        cn1 = rp_obj.ResourceProvider(
            self.context,
            name='cn1',
            uuid=cn1_uuid)
        cn1.create()

        cn2 = rp_obj.ResourceProvider(
            self.context,
            name='cn2',
            uuid=cn2_uuid)
        cn2.create()

        ss = rp_obj.ResourceProvider(
            self.context,
            name='ss',
            uuid=ss_uuid)
        ss.create()

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            vcpu_inv = rp_obj.Inventory(
                self.context,
                resource_provider=cn,
                resource_class='VCPU',
                total=24,
                reserved=0,
                max_unit=24,
                min_unit=1,
                step_size=1,
                allocation_ratio=16.0)
            vcpu_inv.obj_set_defaults()
            ram_inv = rp_obj.Inventory(
                self.context,
                resource_provider=cn,
                resource_class='MEMORY_MB',
                total=128 * 1024,
                reserved=0,
                max_unit=128 * 1024,
                min_unit=256,
                step_size=256,
                allocation_ratio=1.5)
            ram_inv.obj_set_defaults()
            inv_list = rp_obj.InventoryList(objects=[vcpu_inv, ram_inv])
            cn.set_inventory(inv_list)

        t_avx_sse = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE")
        t_avx_sse2 = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE2")
        cn1.set_traits(rp_obj.TraitList(objects=[t_avx_sse, t_avx_sse2]))

        # Populate shared storage provider with DISK_GB inventory
        disk_inv = rp_obj.Inventory(
            self.context,
            resource_provider=ss,
            resource_class='DISK_GB',
            total=2000,
            reserved=100,
            max_unit=2000,
            min_unit=10,
            step_size=10,
            allocation_ratio=1.0)
        disk_inv.obj_set_defaults()
        inv_list = rp_obj.InventoryList(objects=[disk_inv])
        ss.set_inventory(inv_list)

        # Mark the shared storage pool as having inventory shared among any
        # provider associated via aggregate
        t = rp_obj.Trait.get_by_name(
            self.context,
            "MISC_SHARES_VIA_AGGREGATE",
        )
        ss.set_traits(rp_obj.TraitList(objects=[t]))

        # Now associate the shared storage pool and both compute nodes with the
        # same aggregate
        cn1.set_aggregates([agg_uuid])
        cn2.set_aggregates([agg_uuid])
        ss.set_aggregates([agg_uuid])
Example #51
0
 def setUpClass(cls):
     cls._c = context.get_admin_context()
     cls._ac = admin.AdminController()
Example #52
0
    def start_fixture(self):
        super(NonSharedStorageFixture, self).start_fixture()
        self.context = context.get_admin_context()

        cn1_uuid = uuidutils.generate_uuid()
        cn2_uuid = uuidutils.generate_uuid()
        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()
        os.environ['CN1_UUID'] = cn1_uuid
        os.environ['CN2_UUID'] = cn2_uuid
        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        cn1 = rp_obj.ResourceProvider(
            self.context,
            name='cn1',
            uuid=cn1_uuid)
        cn1.create()

        cn2 = rp_obj.ResourceProvider(
            self.context,
            name='cn2',
            uuid=cn2_uuid)
        cn2.create()

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            vcpu_inv = rp_obj.Inventory(
                self.context,
                resource_provider=cn,
                resource_class='VCPU',
                total=24,
                reserved=0,
                max_unit=24,
                min_unit=1,
                step_size=1,
                allocation_ratio=16.0)
            vcpu_inv.obj_set_defaults()
            ram_inv = rp_obj.Inventory(
                self.context,
                resource_provider=cn,
                resource_class='MEMORY_MB',
                total=128 * 1024,
                reserved=0,
                max_unit=128 * 1024,
                min_unit=256,
                step_size=256,
                allocation_ratio=1.5)
            ram_inv.obj_set_defaults()
            disk_inv = rp_obj.Inventory(
                self.context,
                resource_provider=cn,
                resource_class='DISK_GB',
                total=2000,
                reserved=100,
                max_unit=2000,
                min_unit=10,
                step_size=10,
                allocation_ratio=1.0)
            disk_inv.obj_set_defaults()
            inv_list = rp_obj.InventoryList(objects=[vcpu_inv, ram_inv,
                    disk_inv])
            cn.set_inventory(inv_list)
Example #53
0
 def get_users(self):
     """Retrieve list of users"""
     return [
         self._db_user_to_auth_user(user)
         for user in db.user_get_all(context.get_admin_context())
     ]
Example #54
0
 def init_host(self):
     """Start up any config'ed consoles on start."""
     ctxt = context.get_admin_context()
     self._rebuild_xvp_conf(ctxt)
Example #55
0
 def get_user_from_access_key(self, access):
     """Retrieve user by access key"""
     user = db.user_get_by_access_key(context.get_admin_context(), access)
     return self._db_user_to_auth_user(user)
Example #56
0
    def start(self):
        verstr = version.version_string_with_package()
        LOG.audit(_('Starting %(topic)s node (version %(version)s)'), {
            'topic': self.topic,
            'version': verstr
        })
        self.basic_config_check()
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            self.service_ref = self.conductor_api.service_get_by_args(
                ctxt, self.host, self.binary)
            self.service_id = self.service_ref['id']
        except exception.NotFound:
            try:
                self.service_ref = self._create_service_ref(ctxt)
            except (exception.ServiceTopicExists,
                    exception.ServiceBinaryExists):
                # NOTE(danms): If we race to create a record with a sibling
                # worker, don't fail here.
                self.service_ref = self.conductor_api.service_get_by_args(
                    ctxt, self.host, self.binary)

        self.manager.pre_start_hook()

        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)

        endpoints = [
            self.manager,
            baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
        ]
        endpoints.extend(self.manager.additional_endpoints)

        serializer = objects_base.NovaObjectSerializer()

        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.post_start_hook()

        LOG.debug("Join ServiceGroup membership for this service %s",
                  self.topic)
        # Add service to the ServiceGroup membership group.
        self.servicegroup_api.join(self.host, self.topic, self)

        if self.periodic_enable:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            self.tg.add_dynamic_timer(
                self.periodic_tasks,
                initial_delay=initial_delay,
                periodic_interval_max=self.periodic_interval_max)
Example #57
0
 def delete_project(self, project_id):
     """Delete a project"""
     db.project_delete(context.get_admin_context(), project_id)
Example #58
0
 def periodic_tasks(self, raise_on_error=False):
     """Tasks to be run at a periodic interval."""
     ctxt = context.get_admin_context()
     return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
Example #59
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: http://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.NovaException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.get('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                for hcookie_part in hcookie.split(';'):
                    hcookie_part = hcookie_part.lstrip()
                    try:
                        cookie.load(hcookie_part)
                    except Cookie.CookieError:
                        # NOTE(stgleb): Do not print out cookie content
                        # for security reasons.
                        LOG.warning('Found malformed cookie')
                    else:
                        if 'token' in cookie:
                            token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise exception.InvalidToken(token=token)

        # Verify Origin
        expected_origin_hostname = self.headers.get('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.console.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.get('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
                                                          'port': port})
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send(encodeutils.safe_encode(
                "CONNECT %s HTTP/1.1\r\n\r\n" %
                connect_info['internal_access_path']))
            end_token = "\r\n\r\n"
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                token_loc = data.find(end_token)
                if token_loc != -1:
                    if data.split("\r\n")[0].find("200") == -1:
                        raise exception.InvalidConnectionInfo()
                    # remove the response from recv buffer
                    tsock.recv(token_loc + len(end_token))
                    break

        if self.server.security_proxy is not None:
            tenant_sock = TenantSock(self)

            try:
                tsock = self.server.security_proxy.connect(tenant_sock, tsock)
            except exception.SecurityProxyNegotiationFailed:
                LOG.exception("Unable to perform security proxying, shutting "
                              "down connection")
                tenant_sock.close()
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                raise

            tenant_sock.finish_up()

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(_("%(host)s:%(port)s: "
                          "Websocket client or target closed") %
                          {'host': host, 'port': port})
            raise
Example #60
0
File: vmops.py Project: termie/nova
    def spawn(self, context, instance, network_info):
        """
        Creates a VM instance.

        Steps followed are:
        1. Create a VM with no disk and the specifics in the instance object
            like RAM size.
        2. Create a dummy vmdk of the size of the disk file that is to be
            uploaded. This is required just to create the metadata file.
        3. Delete the -flat.vmdk file created in the above step and retain
            the metadata .vmdk file.
        4. Upload the disk file.
        5. Attach the disk to the VM by reconfiguring the same.
        6. Power on the VM.
        """
        vm_ref = self._get_vm_ref_from_the_name(instance.name)
        if vm_ref:
            raise exception.InstanceExists(name=instance.name)

        client_factory = self._session._get_vim().client.factory
        service_content = self._session._get_vim().get_service_content()

        network = db.network_get_by_instance(nova_context.get_admin_context(),
                                             instance['id'])

        net_name = network['bridge']

        def _check_if_network_bridge_exists():
            network_ref = \
                network_utils.get_network_with_the_name(self._session,
                                                        net_name)
            if network_ref is None:
                raise exception.NetworkNotFoundForBridge(bridge=net_name)
            return network_ref

        self.plug_vifs(instance, network_info)
        network_obj = _check_if_network_bridge_exists()

        def _get_datastore_ref():
            """Get the datastore list and choose the first local storage."""
            data_stores = self._session._call_method(
                vim_util, "get_objects", "Datastore",
                ["summary.type", "summary.name"])
            for elem in data_stores:
                ds_name = None
                ds_type = None
                for prop in elem.propSet:
                    if prop.name == "summary.type":
                        ds_type = prop.val
                    elif prop.name == "summary.name":
                        ds_name = prop.val
                # Local storage identifier
                if ds_type == "VMFS":
                    data_store_name = ds_name
                    return data_store_name

            if data_store_name is None:
                msg = _("Couldn't get a local Datastore reference")
                LOG.exception(msg)
                raise exception.Error(msg)

        data_store_name = _get_datastore_ref()

        def _get_image_properties():
            """
            Get the Size of the flat vmdk file that is there on the storage
            repository.
            """
            image_size, image_properties = \
                    vmware_images.get_vmdk_size_and_properties(
                                       instance.image_ref, instance)
            vmdk_file_size_in_kb = int(image_size) / 1024
            os_type = image_properties.get("vmware_ostype", "otherGuest")
            adapter_type = image_properties.get("vmware_adaptertype",
                                                "lsiLogic")
            return vmdk_file_size_in_kb, os_type, adapter_type

        vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()

        def _get_vmfolder_and_res_pool_mors():
            """Get the Vm folder ref from the datacenter."""
            dc_objs = self._session._call_method(vim_util, "get_objects",
                                                 "Datacenter", ["vmFolder"])
            # There is only one default datacenter in a standalone ESX host
            vm_folder_mor = dc_objs[0].propSet[0].val

            # Get the resource pool. Taking the first resource pool coming our
            # way. Assuming that is the default resource pool.
            res_pool_mor = self._session._call_method(vim_util, "get_objects",
                                                      "ResourcePool")[0].obj
            return vm_folder_mor, res_pool_mor

        vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()

        # Get the create vm config spec
        config_spec = vm_util.get_vm_create_spec(client_factory, instance,
                                                 data_store_name, net_name,
                                                 os_type, network_obj)

        def _execute_create_vm():
            """Create VM on ESX host."""
            LOG.debug(
                _("Creating VM with the name %s on the ESX  host") %
                instance.name)
            # Create the VM on the ESX host
            vm_create_task = self._session._call_method(
                self._session._get_vim(),
                "CreateVM_Task",
                vm_folder_mor,
                config=config_spec,
                pool=res_pool_mor)
            self._session._wait_for_task(instance.id, vm_create_task)

            LOG.debug(
                _("Created VM with the name %s on the ESX  host") %
                instance.name)

        _execute_create_vm()

        # Set the machine id for the VM for setting the IP
        self._set_machine_id(client_factory, instance)

        # Naming the VM files in correspondence with the VM instance name
        # The flat vmdk file name
        flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
                                                       instance.name)
        # The vmdk meta-data file
        uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
        flat_uploaded_vmdk_path = vm_util.build_datastore_path(
            data_store_name, flat_uploaded_vmdk_name)
        uploaded_vmdk_path = vm_util.build_datastore_path(
            data_store_name, uploaded_vmdk_name)

        def _create_virtual_disk():
            """Create a virtual disk of the size of flat vmdk file."""
            # Create a Virtual Disk of the size of the flat vmdk file. This is
            # done just to generate the meta-data file whose specifics
            # depend on the size of the disk, thin/thick provisioning and the
            # storage adapter type.
            # Here we assume thick provisioning and lsiLogic for the adapter
            # type
            LOG.debug(
                _("Creating Virtual Disk of size  "
                  "%(vmdk_file_size_in_kb)s KB and adapter type  "
                  "%(adapter_type)s on the ESX host local store"
                  " %(data_store_name)s") % {
                      "vmdk_file_size_in_kb": vmdk_file_size_in_kb,
                      "adapter_type": adapter_type,
                      "data_store_name": data_store_name
                  })
            vmdk_create_spec = vm_util.get_vmdk_create_spec(
                client_factory, vmdk_file_size_in_kb, adapter_type)
            vmdk_create_task = self._session._call_method(
                self._session._get_vim(),
                "CreateVirtualDisk_Task",
                service_content.virtualDiskManager,
                name=uploaded_vmdk_path,
                datacenter=self._get_datacenter_name_and_ref()[0],
                spec=vmdk_create_spec)
            self._session._wait_for_task(instance.id, vmdk_create_task)
            LOG.debug(
                _("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
                  " KB on the ESX host local store "
                  "%(data_store_name)s") % {
                      "vmdk_file_size_in_kb": vmdk_file_size_in_kb,
                      "data_store_name": data_store_name
                  })

        _create_virtual_disk()

        def _delete_disk_file():
            LOG.debug(
                _("Deleting the file %(flat_uploaded_vmdk_path)s "
                  "on the ESX host local"
                  "store %(data_store_name)s") % {
                      "flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
                      "data_store_name": data_store_name
                  })
            # Delete the -flat.vmdk file created. .vmdk file is retained.
            vmdk_delete_task = self._session._call_method(
                self._session._get_vim(),
                "DeleteDatastoreFile_Task",
                service_content.fileManager,
                name=flat_uploaded_vmdk_path)
            self._session._wait_for_task(instance.id, vmdk_delete_task)
            LOG.debug(
                _("Deleted the file %(flat_uploaded_vmdk_path)s on the "
                  "ESX host local store %(data_store_name)s") % {
                      "flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
                      "data_store_name": data_store_name
                  })

        _delete_disk_file()

        cookies = self._session._get_vim().client.options.transport.cookiejar

        def _fetch_image_on_esx_datastore():
            """Fetch image from Glance to ESX datastore."""
            LOG.debug(
                _("Downloading image file data %(image_ref)s to the ESX "
                  "data store %(data_store_name)s") %
                ({
                    'image_ref': instance.image_ref,
                    'data_store_name': data_store_name
                }))
            # Upload the -flat.vmdk file whose meta-data file we just created
            # above
            vmware_images.fetch_image(
                instance.image_ref,
                instance,
                host=self._session._host_ip,
                data_center_name=self._get_datacenter_name_and_ref()[1],
                datastore_name=data_store_name,
                cookies=cookies,
                file_path=flat_uploaded_vmdk_name)
            LOG.debug(
                _("Downloaded image file data %(image_ref)s to the ESX "
                  "data store %(data_store_name)s") %
                ({
                    'image_ref': instance.image_ref,
                    'data_store_name': data_store_name
                }))

        _fetch_image_on_esx_datastore()

        vm_ref = self._get_vm_ref_from_the_name(instance.name)

        def _attach_vmdk_to_the_vm():
            """
            Attach the vmdk uploaded to the VM. VM reconfigure is done
            to do so.
            """
            vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
                client_factory, vmdk_file_size_in_kb, uploaded_vmdk_path,
                adapter_type)
            LOG.debug(
                _("Reconfiguring VM instance %s to attach the image "
                  "disk") % instance.name)
            reconfig_task = self._session._call_method(
                self._session._get_vim(),
                "ReconfigVM_Task",
                vm_ref,
                spec=vmdk_attach_config_spec)
            self._session._wait_for_task(instance.id, reconfig_task)
            LOG.debug(
                _("Reconfigured VM instance %s to attach the image "
                  "disk") % instance.name)

        _attach_vmdk_to_the_vm()

        def _power_on_vm():
            """Power on the VM."""
            LOG.debug(_("Powering on the VM instance %s") % instance.name)
            # Power On the VM
            power_on_task = self._session._call_method(
                self._session._get_vim(), "PowerOnVM_Task", vm_ref)
            self._session._wait_for_task(instance.id, power_on_task)
            LOG.debug(_("Powered on the VM instance %s") % instance.name)

        _power_on_vm()