Exemple #1
0
    def _get_disk_size_mb(self, instance):
        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        if inst_type["local_gb"] == 0:
            return 10 * 1024

        return inst_type["local_gb"] * 1024
Exemple #2
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        # TODO(russellb): no-db-compute: Send the old instance type
        # info that is needed via rpc so db access isn't required
        # here.
        old_instance_type_id = instance["instance_type_id"]
        old_instance_type = instance_types.get_instance_type(old_instance_type_id)

        return db.migration_create(
            context.elevated(),
            {
                "instance_uuid": instance["uuid"],
                "source_compute": instance["host"],
                "source_node": instance["node"],
                "dest_compute": self.host,
                "dest_node": self.nodename,
                "dest_host": self.driver.get_host_ip_addr(),
                "old_instance_type_id": old_instance_type["id"],
                "new_instance_type_id": instance_type["id"],
                "status": "pre-migrating",
            },
        )
Exemple #3
0
    def _start_vcs(self, context, vsa, drives=[]):
        """Start VCs for VSA """

        vsa_id = vsa['id']
        if vsa['status'] == VsaState.CREATING:
            self.vsa_api.update_vsa_status(context, vsa_id, VsaState.LAUNCHING)
        else:
            return

        # in _separate_ loop go over all volumes and mark as "attached"
        has_failed_volumes = False
        for drive in drives:
            vol_name = drive['name']
            vol_disp_name = drive['display_name']
            status = drive['status']
            LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
                        "(%(vol_disp_name)s) is in %(status)s state"),
                        locals())
            if status == 'available':
                try:
                    # self.volume_api.update(context, volume['id'],
                    #                   dict(attach_status="attached"))
                    pass
                except Exception as ex:
                    msg = _("Failed to update attach status for volume "
                            "%(vol_name)s. %(ex)s") % locals()
                    LOG.exception(msg)
            else:
                has_failed_volumes = True

        if has_failed_volumes:
            LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
            self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
            self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED)
            return

        # create user-data record for VC
        storage_data = vsa_utils.generate_user_data(vsa, drives)

        instance_type = instance_types.get_instance_type(
            vsa['instance_type_id'])

        # now start the VC instance

        vc_count = vsa['vc_count']
        LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
                 locals())
        vc_instances = self.compute_api.create(
            context,
            instance_type,  # vsa['vsa_instance_type'],
            vsa['image_ref'],
            min_count=1,
            max_count=vc_count,
            display_name='vc-' + vsa['display_name'],
            display_description='VC for VSA ' + vsa['display_name'],
            availability_zone=vsa['availability_zone'],
            user_data=storage_data,
            metadata=dict(vsa_id=str(vsa_id)))

        self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED)
    def _get_disk_size_mb(self, instance):
        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        if inst_type["local_gb"] == 0:
          return 10 * 1024

        return inst_type["local_gb"] * 1024
    def _select_machine(self, context, instance):
        inst_type = instance_types.get_instance_type(instance['instance_type_id'])

        bmm_found = None
        reuse = False

        # create a non autocommit session
        session = get_session_dodai(False)
        session.begin()
        try:
            bmms = db.bmm_get_all_by_instance_type(context, inst_type["name"], session)
            if instance["availability_zone"] == "resource_pool": #Add a machine to resource pool.
                for bmm in bmms:
                    if bmm["availability_zone"] != "resource_pool":
                        continue

                    if bmm["status"] != "inactive":
                        continue

                    bmm_found = bmm
                    break
            else:
                for bmm in bmms:
                    if bmm["availability_zone"] != "resource_pool":
                        continue
    
                    if bmm["status"] != "active":
                        continue 
        
                    instance_ref = db.instance_get(context, bmm["instance_id"])
                    if instance_ref["image_ref"] != instance["image_ref"]:
                        continue
    
                    bmm_found = bmm
                    reuse = True
                    break
   
                if not bmm_found:
                    for bmm in bmms:
                        if bmm["status"] == "used" or bmm["status"] == "processing":
                            continue
    
                        bmm_found = bmm
                        reuse = False
                        break

            if bmm_found:
                db.bmm_update(context, bmm_found["id"], {"status": "processing"}, session)
        except Exception as ex:
            LOG.exception(ex)
            session.rollback()
            raise exception.BareMetalMachineUnavailable() 

        session.commit()

        if bmm_found:
            return bmm_found, reuse

        raise exception.BareMetalMachineUnavailable()
 def ensure_free_mem(cls, session, instance):
     inst_type_id = instance.instance_type_id
     instance_type = instance_types.get_instance_type(inst_type_id)
     mem = long(instance_type["memory_mb"]) * 1024 * 1024
     # get free memory from host
     host = session.get_xenapi_host()
     host_free_mem = long(session.get_xenapi().host.compute_free_memory(host))
     return host_free_mem >= mem
Exemple #7
0
 def activate_bootloader(self, var, context, node, instance):
     image_path = var['image_path']
     inst_type_id = instance['instance_type_id']
     inst_type = instance_types.get_instance_type(inst_type_id)
     network_info = var['network_info']
     nets = self._find_MAC_Addresses(network_info)
     self._getregion(image_path, node, nets)
     LOG.debug("successfully bypassing activate_bootloader")
Exemple #8
0
    def _setUpBlockDeviceMapping(self):
        image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
        sys_meta = instance_types.save_instance_type_info(
            {}, instance_types.get_instance_type(1))
        inst1 = db.instance_create(self.context,
                                  {'image_ref': image_uuid,
                                   'instance_type_id': 1,
                                   'root_device_name': '/dev/sdb1',
                                   'system_metadata': sys_meta})
        inst2 = db.instance_create(self.context,
                                  {'image_ref': image_uuid,
                                   'instance_type_id': 1,
                                   'root_device_name': '/dev/sdc1',
                                   'system_metadata': sys_meta})

        instance_uuid = inst1['uuid']
        mappings0 = [
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb1',
             'snapshot_id': '1',
             'volume_id': '2'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb2',
             'volume_id': '3',
             'volume_size': 1},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb3',
             'delete_on_termination': True,
             'snapshot_id': '4',
             'volume_id': '5'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb4',
             'delete_on_termination': False,
             'snapshot_id': '6',
             'volume_id': '7'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb5',
             'snapshot_id': '8',
             'volume_id': '9',
             'volume_size': 0},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb6',
             'snapshot_id': '10',
             'volume_id': '11',
             'volume_size': 1},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb7',
             'no_device': True},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb8',
             'virtual_name': 'swap'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb9',
             'virtual_name': 'ephemeral3'}]

        volumes = self._block_device_mapping_create(instance_uuid, mappings0)
        return (inst1, inst2, volumes)
Exemple #9
0
    def _setUpBlockDeviceMapping(self):
        image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
        sys_meta = instance_types.save_instance_type_info(
            {}, instance_types.get_instance_type(1))
        inst1 = db.instance_create(self.context,
                                  {'image_ref': image_uuid,
                                   'instance_type_id': 1,
                                   'root_device_name': '/dev/sdb1',
                                   'system_metadata': sys_meta})
        inst2 = db.instance_create(self.context,
                                  {'image_ref': image_uuid,
                                   'instance_type_id': 1,
                                   'root_device_name': '/dev/sdc1',
                                   'system_metadata': sys_meta})

        instance_uuid = inst1['uuid']
        mappings0 = [
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb1',
             'snapshot_id': '1',
             'volume_id': '2'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb2',
             'volume_id': '3',
             'volume_size': 1},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb3',
             'delete_on_termination': True,
             'snapshot_id': '4',
             'volume_id': '5'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb4',
             'delete_on_termination': False,
             'snapshot_id': '6',
             'volume_id': '7'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb5',
             'snapshot_id': '8',
             'volume_id': '9',
             'volume_size': 0},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb6',
             'snapshot_id': '10',
             'volume_id': '11',
             'volume_size': 1},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb7',
             'no_device': True},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb8',
             'virtual_name': 'swap'},
            {'instance_uuid': instance_uuid,
             'device_name': '/dev/sdb9',
             'virtual_name': 'ephemeral3'}]

        volumes = self._block_device_mapping_create(instance_uuid, mappings0)
        return (inst1, inst2, volumes)
Exemple #10
0
 def ensure_free_mem(cls, session, instance):
     inst_type_id = instance.instance_type_id
     instance_type = instance_types.get_instance_type(inst_type_id)
     mem = long(instance_type['memory_mb']) * 1024 * 1024
     #get free memory from host
     host = session.get_xenapi_host()
     host_free_mem = long(session.get_xenapi().host.
                          compute_free_memory(host))
     return host_free_mem >= mem
 def _instance_to_router(self, context, instance, image_meta):
     inst_type = \
         instance_types.get_instance_type(instance["instance_type_id"])
     chassis = self._chassis_for_flavor(inst_type['name'])
     C = self._class_for_instance(image_meta)
     r = C(self.dynamips, name=instance["id"], chassis=chassis)
     r.os_name = instance["name"]
     r.ram = inst_type["memory_mb"]
     r.os_prototype = instance
     return r
 def _instance_to_router(self, context, instance, image_meta):
     inst_type = \
         instance_types.get_instance_type(instance["instance_type_id"])
     chassis = self._chassis_for_flavor(inst_type['name'])
     C = self._class_for_instance(image_meta)
     r = C(self.dynamips, name=instance["id"], chassis=chassis)
     r.os_name = instance["name"]
     r.ram = inst_type["memory_mb"]
     r.os_prototype = instance
     return r
Exemple #13
0
def get_device_name_for_instance(context, instance, device):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the block device mapping table to find
    valid device names. If the device name is valid but applicable to
    a different backend (for example /dev/vdc is specified but the
    backend uses /dev/xvdc), the device name will be converted to the
    appropriate format.
    """
    req_prefix = None
    req_letters = None
    if device:
        try:
            req_prefix, req_letters = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)
    bdms = db.block_device_mapping_get_all_by_instance(context,
                instance['uuid'])
    mappings = block_device.instance_block_mapping(instance, bdms)
    try:
        prefix = block_device.match_device(mappings['root'])[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=mappings['root'])
    # NOTE(vish): remove this when xenapi is setting default_root_device
    if (FLAGS.connection_type == 'xenapi' or
        FLAGS.compute_driver.endswith('xenapi.XenAPIDriver')):
        prefix = '/dev/xvd'
    if req_prefix != prefix:
        LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())
    letters_list = []
    for _name, device in mappings.iteritems():
        letter = block_device.strip_prefix(device)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        letters_list.append(letter)
    used_letters = set(letters_list)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if (FLAGS.connection_type == 'xenapi' or
        FLAGS.compute_driver.endswith('xenapi.XenAPIDriver')):
        instance_type_id = instance['instance_type_id']
        instance_type = instance_types.get_instance_type(instance_type_id)
        if instance_type['ephemeral_gb']:
            used_letters.update('b')
        if instance_type['swap']:
            used_letters.update('c')

    if not req_letters:
        req_letters = _get_unused_letters(used_letters)
    if req_letters in used_letters:
        raise exception.DevicePathInUse(path=device)
    return prefix + req_letters
 def get_info(self, instance):
     n = self._router_by_name(instance["name"])
     mem_mb = instance_types.get_instance_type(
         n.os_prototype['instance_type_id']).get("memory_mb")
     return {
         'state': n.os_state,
         'max_mem': int(mem_mb) * 1024,
         'mem': n.ram * 1024,
         'num_cpu': 1,
         'cpu_time': 0  # cpuinfo?
     }
 def test_instance_type_create_then_delete(self):
     """Ensure instance types can be created"""
     starting_inst_list = instance_types.get_all_types()
     instance_types.create(self.name, 256, 1, 120, self.flavorid)
     new = instance_types.get_all_types()
     self.assertNotEqual(len(starting_inst_list), len(new), "instance type was not created")
     instance_types.destroy(self.name)
     self.assertEqual(1, instance_types.get_instance_type(self.id)["deleted"])
     self.assertEqual(starting_inst_list, instance_types.get_all_types())
     instance_types.purge(self.name)
     self.assertEqual(len(starting_inst_list), len(instance_types.get_all_types()), "instance type not purged")
 def get_info(self, instance):
     n = self._router_by_name(instance["name"])
     mem_mb = instance_types.get_instance_type(
         n.os_prototype['instance_type_id']).get("memory_mb")
     return {
         'state': n.os_state,
         'max_mem': int(mem_mb) * 1024,
         'mem': n.ram * 1024,
         'num_cpu': 1,
         'cpu_time': 0  # cpuinfo?
     }
Exemple #17
0
    def fetch_blank_disk(cls, session, instance_type_id):
        # Size the blank harddrive to suit the machine type:
        one_gig = 1024 * 1024 * 1024
        req_type = instance_types.get_instance_type(instance_type_id)
        req_size = req_type['local_gb']

        LOG.debug("Creating blank HD of size %(req_size)d gigs" % locals())
        vdi_size = one_gig * req_size

        LOG.debug("ISO vm create: Looking for the SR")
        sr_ref = safe_find_sr(session)

        vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False)
        return vdi_ref
Exemple #18
0
def get_partition_sizes(instance):
    type_id = instance['instance_type_id']
    root_mb = instance['root_gb'] * 1024

    # NOTE(deva): is there a way to get swap_mb directly from instance?
    swap_mb = instance_types.get_instance_type(type_id)['swap']

    # NOTE(deva): For simpler code paths on the deployment side,
    #             we always create a swap partition. If the flavor
    #             does not specify any swap, we default to 1MB
    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
Exemple #19
0
    def fetch_blank_disk(cls, session, instance_type_id):
        # Size the blank harddrive to suit the machine type:
        one_gig = 1024 * 1024 * 1024
        req_type = instance_types.get_instance_type(instance_type_id)
        req_size = req_type["local_gb"]

        LOG.debug("Creating blank HD of size %(req_size)d gigs" % locals())
        vdi_size = one_gig * req_size

        LOG.debug("ISO vm create: Looking for the SR")
        sr_ref = safe_find_sr(session)

        vdi_ref = cls.create_vdi(session, sr_ref, "blank HD", vdi_size, False)
        return vdi_ref
Exemple #20
0
def get_partition_sizes(instance):
    type_id = instance["instance_type_id"]
    root_mb = instance["root_gb"] * 1024

    # NOTE(deva): is there a way to get swap_mb directly from instance?
    swap_mb = instance_types.get_instance_type(type_id)["swap"]

    # NOTE(deva): For simpler code paths on the deployment side,
    #             we always create a swap partition. If the flavor
    #             does not specify any swap, we default to 1MB
    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
Exemple #21
0
 def test_instance_type_create_then_delete(self):
     """Ensure instance types can be created"""
     starting_inst_list = instance_types.get_all_types()
     instance_types.create(self.name, 256, 1, 120, self.flavorid)
     new = instance_types.get_all_types()
     self.assertNotEqual(len(starting_inst_list), len(new),
                         'instance type was not created')
     instance_types.destroy(self.name)
     self.assertEqual(1,
                      instance_types.get_instance_type(self.id)["deleted"])
     self.assertEqual(starting_inst_list, instance_types.get_all_types())
     instance_types.purge(self.name)
     self.assertEqual(len(starting_inst_list),
                      len(instance_types.get_all_types()),
                      'instance type not purged')
Exemple #22
0
    def get_instance_nw_info(self, context, instance_id, instance_uuid,
                                            instance_type_id, host):
        """This method is used by compute to fetch all network data
           that should be used when creating the VM.

           The method simply loops through all virtual interfaces
           stored in the nova DB and queries the IPAM lib to get
           the associated IP data.

           The format of returned data is 'defined' by the initial
           set of NetworkManagers found in nova/network/manager.py .
           Ideally this 'interface' will be more formally defined
           in the future.
        """
        admin_context = context.elevated()
        project_id = context.project_id
        vifs = db.virtual_interface_get_by_instance(context, instance_id)
        instance_type = instance_types.get_instance_type(instance_type_id)

        net_tenant_dict = dict((net_id, tenant_id)
                               for (net_id, tenant_id)
                               in self.ipam.get_project_and_global_net_ids(
                                                          context, project_id))
        networks = {}
        for vif in vifs:
            if vif.get('network_id') is not None:
                network = db.network_get(admin_context, vif['network_id'])
                net_tenant_id = net_tenant_dict[network['uuid']]
                if net_tenant_id is None:
                    net_tenant_id = FLAGS.quantum_default_tenant_id
                network = {'id': network['id'],
                           'uuid': network['uuid'],
                           'bridge': 'ovs_flag',
                           'label': self.q_conn.get_network_name(net_tenant_id,
                                                              network['uuid']),
                           'project_id': net_tenant_id}
                networks[vif['uuid']] = network

        # update instance network cache and return network_info
        nw_info = self.build_network_info_model(context, vifs, networks,
                                                     instance_type, host)
        db.instance_info_cache_update(context, instance_uuid,
                                      {'network_info': nw_info.as_cache()})

        return nw_info
Exemple #23
0
    def _prepare_xml_info(self,
                          instance,
                          network_info,
                          rescue,
                          block_device_info=None):
        # block_device_mapping = driver.block_device_info_get_mapping(
        #    block_device_info)
        _map = 0
        for (_, mapping) in network_info:
            _map += 1

        nics = []
        # FIXME(vish): stick this in db
        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)

        driver_type = 'raw'

        xml_info = {
            'type': FLAGS.baremetal_type,
            'name': instance['name'],
            'basepath': os.path.join(FLAGS.instances_path, instance['name']),
            'memory_kb': inst_type['memory_mb'] * 1024,
            'vcpus': inst_type['vcpus'],
            'rescue': rescue,
            'driver_type': driver_type,
            'nics': nics,
            'ip_address': mapping['ips'][0]['ip'],
            'mac_address': mapping['mac'],
            'user_data': instance['user_data'],
            'image_id': instance['image_ref'],
            'kernel_id': instance['kernel_id'],
            'ramdisk_id': instance['ramdisk_id']
        }

        if not rescue:
            if instance['kernel_id']:
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"

            if instance['ramdisk_id']:
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"

            xml_info['disk'] = xml_info['basepath'] + "/disk"
        return xml_info
Exemple #24
0
 def _live_migration_instance(self):
     inst_type = instance_types.get_instance_type(1)
     # NOTE(danms): we have _got_ to stop doing this!
     inst_type['memory_mb'] = 1024
     sys_meta = utils.dict_to_metadata(
         instance_types.save_instance_type_info({}, inst_type))
     return {'id': 31337,
             'uuid': 'fake_uuid',
             'name': 'fake-instance',
             'host': 'fake_host1',
             'power_state': power_state.RUNNING,
             'memory_mb': 1024,
             'root_gb': 1024,
             'ephemeral_gb': 0,
             'vm_state': '',
             'task_state': '',
             'instance_type_id': inst_type['id'],
             'image_ref': 'fake-image-ref',
             'system_metadata': sys_meta}
Exemple #25
0
 def _live_migration_instance(self):
     inst_type = instance_types.get_instance_type(1)
     # NOTE(danms): we have _got_ to stop doing this!
     inst_type['memory_mb'] = 1024
     sys_meta = utils.dict_to_metadata(
         instance_types.save_instance_type_info({}, inst_type))
     return {'id': 31337,
             'uuid': 'fake_uuid',
             'name': 'fake-instance',
             'host': 'fake_host1',
             'power_state': power_state.RUNNING,
             'memory_mb': 1024,
             'root_gb': 1024,
             'ephemeral_gb': 0,
             'vm_state': '',
             'task_state': '',
             'instance_type_id': inst_type['id'],
             'image_ref': 'fake-image-ref',
             'system_metadata': sys_meta}
Exemple #26
0
    def activate_bootloader(self, var, context, node, instance):
        tftp_root = var['tftp_root']
        image_root = var['image_root']
        disk_path = os.path.join(image_root, 'disk')
        image_path = tftp_root + "/disk_" + str(node['id'])
        target_path = tftp_root + "/fs_" + str(node['id'])
        utils.execute('sudo', 'mv', disk_path, image_path)
        utils.execute('sudo', 'mount', '-o', 'loop', image_path, target_path)

        root_mb = instance['root_gb'] * 1024

        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        swap_mb = inst_type['swap']
        if swap_mb < 1024:
            swap_mb = 1024

        iscsi_iqn = "iqn-%s" % str(instance['uuid'])
        iscsi_portal = None
Exemple #27
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        # TODO(russellb): no-db-compute: Send the old instance type
        # info that is needed via rpc so db access isn't required
        # here.
        old_instance_type_id = instance['instance_type_id']
        old_instance_type = instance_types.get_instance_type(
                old_instance_type_id)

        return self.conductor_api.migration_create(context, instance,
                {'dest_compute': self.host,
                 'dest_node': self.nodename,
                 'dest_host': self.driver.get_host_ip_addr(),
                 'old_instance_type_id': old_instance_type['id'],
                 'new_instance_type_id': instance_type['id'],
                 'status': 'pre-migrating'})
Exemple #28
0
    def _update_usage_from_migration(self, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        instance = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                migration['old_instance_type_id']):

                itype = migration['new_instance_type_id']
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = migration['old_instance_type_id']

        elif incoming and not instance:
            # instance has not yet migrated here:
            itype = migration['new_instance_type_id']

        elif outbound and not instance:
            # instance migrated, but record usage for a possible revert:
            itype = migration['old_instance_type_id']

        if itype:
            instance_type = instance_types.get_instance_type(itype)
            self.stats.update_stats_for_migration(instance_type)
            self._update_usage(resources, instance_type)
            resources['stats'] = self.stats
            self.tracked_migrations[uuid] = (migration, instance_type)
Exemple #29
0
    def _update_usage_from_migration(self, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        instance = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                migration['old_instance_type_id']):

                itype = migration['new_instance_type_id']
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = migration['old_instance_type_id']

        elif incoming and not instance:
            # instance has not yet migrated here:
            itype = migration['new_instance_type_id']

        elif outbound and not instance:
            # instance migrated, but record usage for a possible revert:
            itype = migration['old_instance_type_id']

        if itype:
            instance_type = instance_types.get_instance_type(itype)
            self.stats.update_stats_for_migration(instance_type)
            self._update_usage(resources, instance_type)
            resources['stats'] = self.stats
            self.tracked_migrations[uuid] = (migration, instance_type)
Exemple #30
0
    def resize(self, context, instance, *args, **kwargs):
        """Resize (ie, migrate) a running instance.

        If flavor_id is None, the process is considered a migration, keeping
        the original flavor_id. If flavor_id is not None, the instance should
        be migrated to a new host and resized to the new flavor_id.
        """
        super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)

        # NOTE(johannes): If we get to this point, then we know the
        # specified flavor_id is valid and exists. We'll need to load
        # it again, but that should be safe.

        old_instance_type_id = instance['instance_type_id']
        old_instance_type = instance_types.get_instance_type(
            old_instance_type_id)

        flavor_id = kwargs.get('flavor_id')

        if not flavor_id:
            new_instance_type = old_instance_type
        else:
            new_instance_type = instance_types.get_instance_type_by_flavor_id(
                flavor_id)

        # NOTE(johannes): Later, when the resize is confirmed or reverted,
        # the superclass implementations of those methods will need access
        # to a local migration record for quota reasons. We don't need
        # source and/or destination information, just the old and new
        # instance_types. Status is set to 'finished' since nothing else
        # will update the status along the way.
        self.db.migration_create(
            context.elevated(), {
                'instance_uuid': instance['uuid'],
                'old_instance_type_id': old_instance_type['id'],
                'new_instance_type_id': new_instance_type['id'],
                'status': 'finished'
            })

        # FIXME(comstud): pass new instance_type object down to a method
        # that'll unfold it
        self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
Exemple #31
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        # TODO(russellb): no-db-compute: Send the old instance type
        # info that is needed via rpc so db access isn't required
        # here.
        old_instance_type_id = instance['instance_type_id']
        old_instance_type = instance_types.get_instance_type(
                old_instance_type_id)

        return db.migration_create(context.elevated(),
                {'instance_uuid': instance['uuid'],
                 'source_compute': instance['host'],
                 'dest_compute': self.host,
                 'dest_host': self.driver.get_host_ip_addr(),
                 'old_instance_type_id': old_instance_type['id'],
                 'new_instance_type_id': instance_type['id'],
                 'status': 'pre-migrating'})
Exemple #32
0
    def activate_bootloader(self, var, context, node, instance):
        tftp_root = var['tftp_root']
        image_root = var['image_root']
        disk_path = os.path.join(image_root, 'disk')
        image_path = tftp_root + "/disk_" + str(node['id'])
        target_path = tftp_root + "/fs_" + str(node['id'])
        utils.execute('mv', disk_path, image_path, run_as_root=True)
        utils.execute('mount', '-o', 'loop', image_path, target_path,
                      run_as_root=True)

        root_mb = instance['root_gb'] * 1024

        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        swap_mb = inst_type['swap']
        if swap_mb < 1024:
            swap_mb = 1024

        iscsi_iqn = "iqn-%s" % str(instance['uuid'])
        iscsi_portal = None
Exemple #33
0
    def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None):
        # block_device_mapping = driver.block_device_info_get_mapping(
        #    block_device_info)
        _map = 0
        for (_, mapping) in network_info:
            _map += 1

        nics = []
        # FIXME(vish): stick this in db
        inst_type_id = instance["instance_type_id"]
        inst_type = instance_types.get_instance_type(inst_type_id)

        driver_type = "raw"

        xml_info = {
            "type": FLAGS.baremetal_type,
            "name": instance["name"],
            "basepath": os.path.join(FLAGS.instances_path, instance["name"]),
            "memory_kb": inst_type["memory_mb"] * 1024,
            "vcpus": inst_type["vcpus"],
            "rescue": rescue,
            "driver_type": driver_type,
            "nics": nics,
            "ip_address": mapping["ips"][0]["ip"],
            "mac_address": mapping["mac"],
            "user_data": instance["user_data"],
            "image_id": instance["image_ref"],
            "kernel_id": instance["kernel_id"],
            "ramdisk_id": instance["ramdisk_id"],
        }

        if not rescue:
            if instance["kernel_id"]:
                xml_info["kernel"] = xml_info["basepath"] + "/kernel"

            if instance["ramdisk_id"]:
                xml_info["ramdisk"] = xml_info["basepath"] + "/ramdisk"

            xml_info["disk"] = xml_info["basepath"] + "/disk"
        return xml_info
Exemple #34
0
    def resize(self, context, instance, *args, **kwargs):
        """Resize (ie, migrate) a running instance.

        If flavor_id is None, the process is considered a migration, keeping
        the original flavor_id. If flavor_id is not None, the instance should
        be migrated to a new host and resized to the new flavor_id.
        """
        super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)

        # NOTE(johannes): If we get to this point, then we know the
        # specified flavor_id is valid and exists. We'll need to load
        # it again, but that should be safe.

        old_instance_type_id = instance['instance_type_id']
        old_instance_type = instance_types.get_instance_type(
                old_instance_type_id)

        flavor_id = kwargs.get('flavor_id')

        if not flavor_id:
            new_instance_type = old_instance_type
        else:
            new_instance_type = instance_types.get_instance_type_by_flavor_id(
                    flavor_id)

        # NOTE(johannes): Later, when the resize is confirmed or reverted,
        # the superclass implementations of those methods will need access
        # to a local migration record for quota reasons. We don't need
        # source and/or destination information, just the old and new
        # instance_types. Status is set to 'finished' since nothing else
        # will update the status along the way.
        self.db.migration_create(context.elevated(),
                    {'instance_uuid': instance['uuid'],
                     'old_instance_type_id': old_instance_type['id'],
                     'new_instance_type_id': new_instance_type['id'],
                     'status': 'finished'})

        # FIXME(comstud): pass new instance_type object down to a method
        # that'll unfold it
        self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
Exemple #35
0
    def _prepare_xml_info(self, instance, network_info, rescue,
                          block_device_info=None):
        # block_device_mapping = driver.block_device_info_get_mapping(
        #    block_device_info)
        map = 0
        for (network, mapping) in network_info:
            map += 1

        nics = []
        # FIXME(vish): stick this in db
        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)

        driver_type = 'raw'

        xml_info = {'type': FLAGS.baremetal_type,
                    'name': instance['name'],
                    'basepath': os.path.join(FLAGS.instances_path,
                                             instance['name']),
                    'memory_kb': inst_type['memory_mb'] * 1024,
                    'vcpus': inst_type['vcpus'],
                    'rescue': rescue,
                    'driver_type': driver_type,
                    'nics': nics,
                    'ip_address': mapping['ips'][0]['ip'],
                    'mac_address': mapping['mac'],
                    'user_data': instance['user_data'],
                    'image_id': instance['image_ref'],
                    'kernel_id': instance['kernel_id'],
                    'ramdisk_id': instance['ramdisk_id']}

        if not rescue:
            if instance['kernel_id']:
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"

            if instance['ramdisk_id']:
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"

            xml_info['disk'] = xml_info['basepath'] + "/disk"
        return xml_info
    def test_instance_type_create_then_delete(self):
        """Ensure instance types can be created"""
        name = 'Small Flavor'
        flavorid = 'flavor1'

        original_list = instance_types.get_all_types()

        # create new type and make sure values stick
        inst_type = instance_types.create(name, 256, 1, 120, flavorid)
        inst_type_id = inst_type['id']
        self.assertEqual(inst_type['flavorid'], flavorid)
        self.assertEqual(inst_type['name'], name)
        self.assertEqual(inst_type['memory_mb'], 256)
        self.assertEqual(inst_type['vcpus'], 1)
        self.assertEqual(inst_type['local_gb'], 120)
        self.assertEqual(inst_type['swap'], 0)
        self.assertEqual(inst_type['rxtx_quota'], 0)
        self.assertEqual(inst_type['rxtx_cap'], 0)

        # make sure new type shows up in list
        new_list = instance_types.get_all_types()
        self.assertNotEqual(len(original_list), len(new_list),
                            'instance type was not created')

        # destroy instance and make sure deleted flag is set to True
        instance_types.destroy(name)
        inst_type = instance_types.get_instance_type(inst_type_id)
        self.assertEqual(1, inst_type["deleted"])

        # deleted instance should not be in list anymoer
        new_list = instance_types.get_all_types()
        self.assertEqual(original_list, new_list)

        # ensure instances are gone after purge
        instance_types.purge(name)
        new_list = instance_types.get_all_types()
        self.assertEqual(original_list, new_list,
                         'instance type not purged')
Exemple #37
0
    def test_instance_type_create_then_delete(self):
        """Ensure instance types can be created"""
        name = 'Small Flavor'
        flavorid = 'flavor1'

        original_list = instance_types.get_all_types()

        # create new type and make sure values stick
        inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)
        inst_type_id = inst_type['id']
        self.assertEqual(inst_type['flavorid'], flavorid)
        self.assertEqual(inst_type['name'], name)
        self.assertEqual(inst_type['memory_mb'], 256)
        self.assertEqual(inst_type['vcpus'], 1)
        self.assertEqual(inst_type['root_gb'], 120)
        self.assertEqual(inst_type['ephemeral_gb'], 100)
        self.assertEqual(inst_type['swap'], 0)
        self.assertEqual(inst_type['rxtx_factor'], 1)

        # make sure new type shows up in list
        new_list = instance_types.get_all_types()
        self.assertNotEqual(len(original_list), len(new_list),
                            'instance type was not created')

        # destroy instance and make sure deleted flag is set to True
        instance_types.destroy(name)
        inst_type = instance_types.get_instance_type(inst_type_id)
        self.assertEqual(1, inst_type["deleted"])

        # deleted instance should not be in list anymoer
        new_list = instance_types.get_all_types()
        self.assertEqual(original_list, new_list)

        # ensure instances are gone after purge
        instance_types.purge(name)
        new_list = instance_types.get_all_types()
        self.assertEqual(original_list, new_list, 'instance type not purged')
Exemple #38
0
    def _create_image(self, context, inst, xml, suffix="", disk_images=None, network_info=None, block_device_info=None):
        if not suffix:
            suffix = ""

        # syntactic nicety
        def basepath(fname="", suffix=suffix):
            return os.path.join(FLAGS.instances_path, inst["name"], fname + suffix)

        # ensure directories exist and are writable
        libvirt_utils.ensure_tree(basepath(suffix=""))
        utils.execute("chmod", "0777", basepath(suffix=""))

        LOG.info(_("instance %s: Creating image"), inst["name"], instance=inst)

        if FLAGS.baremetal_type == "lxc":
            container_dir = "%s/rootfs" % basepath(suffix="")
            libvirt_utils.ensure_tree(container_dir)

        # NOTE(vish): No need add the suffix to console.log
        libvirt_utils.write_to_file(basepath("console.log", ""), "", 007)

        if not disk_images:
            disk_images = {
                "image_id": inst["image_ref"],
                "kernel_id": inst["kernel_id"],
                "ramdisk_id": inst["ramdisk_id"],
            }

        if disk_images["kernel_id"]:
            fname = disk_images["kernel_id"]
            self._cache_image(
                fn=libvirt_utils.fetch_image,
                context=context,
                target=basepath("kernel"),
                fname=fname,
                cow=False,
                image_id=disk_images["kernel_id"],
                user_id=inst["user_id"],
                project_id=inst["project_id"],
            )
            if disk_images["ramdisk_id"]:
                fname = disk_images["ramdisk_id"]
                self._cache_image(
                    fn=libvirt_utils.fetch_image,
                    context=context,
                    target=basepath("ramdisk"),
                    fname=fname,
                    cow=False,
                    image_id=disk_images["ramdisk_id"],
                    user_id=inst["user_id"],
                    project_id=inst["project_id"],
                )

        root_fname = hashlib.sha1(str(disk_images["image_id"])).hexdigest()
        size = inst["root_gb"] * 1024 * 1024 * 1024

        inst_type_id = inst["instance_type_id"]
        inst_type = instance_types.get_instance_type(inst_type_id)
        if inst_type["name"] == "m1.tiny" or suffix == ".rescue":
            size = None
            root_fname += "_sm"
        else:
            root_fname += "_%d" % inst["root_gb"]

        self._cache_image(
            fn=libvirt_utils.fetch_image,
            context=context,
            target=basepath("root"),
            fname=root_fname,
            cow=False,  # FLAGS.use_cow_images,
            image_id=disk_images["image_id"],
            user_id=inst["user_id"],
            project_id=inst["project_id"],
        )

        # For now, we assume that if we're not using a kernel, we're using a
        # partitioned disk image where the target partition is the first
        # partition
        target_partition = None
        if not inst["kernel_id"]:
            target_partition = "1"

        if FLAGS.baremetal_type == "lxc":
            target_partition = None

        if inst["key_data"]:
            key = str(inst["key_data"])
        else:
            key = None
        net = None

        nets = []
        ifc_template = open(FLAGS.injected_network_template).read()
        ifc_num = -1
        have_injected_networks = False
        admin_context = nova_context.get_admin_context()
        for (network_ref, mapping) in network_info:
            ifc_num += 1

            if not network_ref["injected"]:
                continue

            have_injected_networks = True
            address = mapping["ips"][0]["ip"]
            netmask = mapping["ips"][0]["netmask"]
            address_v6 = None
            gateway_v6 = None
            netmask_v6 = None
            if FLAGS.use_ipv6:
                address_v6 = mapping["ip6s"][0]["ip"]
                netmask_v6 = mapping["ip6s"][0]["netmask"]
                gateway_v6 = mapping["gateway_v6"]
            net_info = {
                "name": "eth%d" % ifc_num,
                "address": address,
                "netmask": netmask,
                "gateway": mapping["gateway"],
                "broadcast": mapping["broadcast"],
                "dns": " ".join(mapping["dns"]),
                "address_v6": address_v6,
                "gateway_v6": gateway_v6,
                "netmask_v6": netmask_v6,
            }
            nets.append(net_info)

        if have_injected_networks:
            net = str(Template(ifc_template, searchList=[{"interfaces": nets, "use_ipv6": FLAGS.use_ipv6}]))

        metadata = inst.get("metadata")
        if any((key, net, metadata)):
            inst_name = inst["name"]

            injection_path = basepath("root")
            img_id = inst.image_ref
            disable_auto_fsck = True

            for injection in ("metadata", "key", "net"):
                if locals()[injection]:
                    LOG.info(
                        _("instance %(inst_name)s: injecting " "%(injection)s into image %(img_id)s"),
                        locals(),
                        instance=inst,
                    )
            try:
                disk.inject_data(
                    injection_path,
                    key,
                    net,
                    metadata,
                    partition=target_partition,
                    use_cow=False,  # FLAGS.use_cow_images,
                    disable_auto_fsck=disable_auto_fsck,
                )

            except Exception as e:
                # This could be a windows image, or a vmdk format disk
                LOG.warn(
                    _("instance %(inst_name)s: ignoring error injecting" " data into image %(img_id)s (%(e)s)")
                    % locals(),
                    instance=inst,
                )
Exemple #39
0
    def _start_vcs(self, context, vsa, drives=[]):
        """Start VCs for VSA """

        vsa_id = vsa['id']
        if vsa['status'] == VsaState.CREATING:
            self.vsa_api.update_vsa_status(context, vsa_id,
                                           VsaState.LAUNCHING)
        else:
            return

        # in _separate_ loop go over all volumes and mark as "attached"
        has_failed_volumes = False
        for drive in drives:
            vol_name = drive['name']
            vol_disp_name = drive['display_name']
            status = drive['status']
            LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
                        "(%(vol_disp_name)s) is in %(status)s state"),
                        locals())
            if status == 'available':
                try:
                    # self.volume_api.update(context, volume,
                    #                   dict(attach_status="attached"))
                    pass
                except Exception as ex:
                    msg = _("Failed to update attach status for volume "
                            "%(vol_name)s. %(ex)s") % locals()
                    LOG.exception(msg)
            else:
                has_failed_volumes = True

        if has_failed_volumes:
            LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
            self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
            self.vsa_api.update_vsa_status(context, vsa_id,
                                           VsaState.FAILED)
            return

        # create user-data record for VC
        storage_data = vsa_utils.generate_user_data(vsa, drives)

        instance_type = instance_types.get_instance_type(
                                            vsa['instance_type_id'])

        # now start the VC instance

        vc_count = vsa['vc_count']
        LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
                    locals())
        vc_instances = self.compute_api.create(context,
                instance_type,      # vsa['vsa_instance_type'],
                vsa['image_ref'],
                min_count=1,
                max_count=vc_count,
                display_name='vc-' + vsa['display_name'],
                display_description='VC for VSA ' + vsa['display_name'],
                availability_zone=vsa['availability_zone'],
                user_data=storage_data,
                metadata=dict(vsa_id=str(vsa_id)))

        self.vsa_api.update_vsa_status(context, vsa_id,
                                       VsaState.CREATED)
Exemple #40
0
    def _create_image(self, context, inst, xml, suffix='',
                      disk_images=None, network_info=None,
                      block_device_info=None):
        if not suffix:
            suffix = ''

        # syntactic nicety
        def basepath(fname='', suffix=suffix):
            return os.path.join(FLAGS.instances_path,
                                inst['name'],
                                fname + suffix)

        # ensure directories exist and are writable
        libvirt_utils.ensure_tree(basepath(suffix=''))
        utils.execute('chmod', '0777', basepath(suffix=''))

        LOG.info(_('instance %s: Creating image'), inst['name'])

        if FLAGS.baremetal_type == 'lxc':
            container_dir = '%s/rootfs' % basepath(suffix='')
            libvirt_utils.ensure_tree(container_dir)

        # NOTE(vish): No need add the suffix to console.log
        libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)

        if not disk_images:
            disk_images = {'image_id': inst['image_ref'],
                           'kernel_id': inst['kernel_id'],
                           'ramdisk_id': inst['ramdisk_id']}

        if disk_images['kernel_id']:
            fname = disk_images['kernel_id']
            self._cache_image(fn=libvirt_utils.fetch_image,
                              context=context,
                              target=basepath('kernel'),
                              fname=fname,
                              cow=False,
                              image_id=disk_images['kernel_id'],
                              user_id=inst['user_id'],
                              project_id=inst['project_id'])
            if disk_images['ramdisk_id']:
                fname = disk_images['ramdisk_id']
                self._cache_image(fn=libvirt_utils.fetch_image,
                                  context=context,
                                  target=basepath('ramdisk'),
                                  fname=fname,
                                  cow=False,
                                  image_id=disk_images['ramdisk_id'],
                                  user_id=inst['user_id'],
                                  project_id=inst['project_id'])

        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
        size = inst['root_gb'] * 1024 * 1024 * 1024

        inst_type_id = inst['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
            size = None
            root_fname += "_sm"
        else:
            root_fname += "_%d" % inst['root_gb']

        self._cache_image(fn=libvirt_utils.fetch_image,
                          context=context,
                          target=basepath('root'),
                          fname=root_fname,
                          cow=False,  # FLAGS.use_cow_images,
                          image_id=disk_images['image_id'],
                          user_id=inst['user_id'],
                          project_id=inst['project_id'],
                          size=size)

        # For now, we assume that if we're not using a kernel, we're using a
        # partitioned disk image where the target partition is the first
        # partition
        target_partition = None
        if not inst['kernel_id']:
            target_partition = "1"

        if FLAGS.baremetal_type == 'lxc':
            target_partition = None

        if inst['key_data']:
            key = str(inst['key_data'])
        else:
            key = None
        net = None

        nets = []
        ifc_template = open(FLAGS.injected_network_template).read()
        ifc_num = -1
        have_injected_networks = False
        admin_context = nova_context.get_admin_context()
        for (network_ref, mapping) in network_info:
            ifc_num += 1

            if not network_ref['injected']:
                continue

            have_injected_networks = True
            address = mapping['ips'][0]['ip']
            netmask = mapping['ips'][0]['netmask']
            address_v6 = None
            gateway_v6 = None
            netmask_v6 = None
            if FLAGS.use_ipv6:
                address_v6 = mapping['ip6s'][0]['ip']
                netmask_v6 = mapping['ip6s'][0]['netmask']
                gateway_v6 = mapping['gateway_v6']
            net_info = {'name': 'eth%d' % ifc_num,
                   'address': address,
                   'netmask': netmask,
                   'gateway': mapping['gateway'],
                   'broadcast': mapping['broadcast'],
                   'dns': ' '.join(mapping['dns']),
                   'address_v6': address_v6,
                   'gateway_v6': gateway_v6,
                   'netmask_v6': netmask_v6}
            nets.append(net_info)

        if have_injected_networks:
            net = str(Template(ifc_template,
                               searchList=[{'interfaces': nets,
                                            'use_ipv6': FLAGS.use_ipv6}]))

        metadata = inst.get('metadata')
        if any((key, net, metadata)):
            inst_name = inst['name']

            injection_path = basepath('root')
            img_id = inst.image_ref
            disable_auto_fsck = True

            for injection in ('metadata', 'key', 'net'):
                if locals()[injection]:
                    LOG.info(_('instance %(inst_name)s: injecting '
                               '%(injection)s into image %(img_id)s')
                             % locals())
            try:
                disk.inject_data(injection_path, key, net, metadata,
                                 partition=target_partition,
                                 use_cow=False,  # FLAGS.use_cow_images,
                                 disable_auto_fsck=disable_auto_fsck)

            except Exception as e:
                # This could be a windows image, or a vmdk format disk
                LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
                        ' data into image %(img_id)s (%(e)s)') % locals())
 def test_will_get_instance_type_by_id(self):
     default_instance_type = instance_types.get_default_instance_type()
     instance_type_id = default_instance_type['id']
     fetched = instance_types.get_instance_type(instance_type_id)
     self.assertEqual(default_instance_type, fetched)
Exemple #42
0
    def create(self,
               context,
               instance_type,
               image_id,
               kernel_id=None,
               ramdisk_id=None,
               min_count=1,
               max_count=1,
               display_name='',
               display_description='',
               key_name=None,
               key_data=None,
               security_group='default',
               availability_zone=None,
               user_data=None,
               metadata=[],
               injected_files=None):
        """Create the number of instances requested if quota and
        other arguments check out ok."""

        type_data = instance_types.get_instance_type(instance_type)
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeeded for %(pid)s,"
                  " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only "
                  "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded")

        num_metadata = len(metadata)
        quota_metadata = quota.allowed_metadata_items(context, num_metadata)
        if quota_metadata < num_metadata:
            pid = context.project_id
            msg = (_("Quota exceeeded for %(pid)s,"
                     " tried to set %(num_metadata)s metadata properties") %
                   locals())
            LOG.warn(msg)
            raise quota.QuotaError(msg, "MetadataLimitExceeded")

        # Because metadata is stored in the DB, we hard-code the size limits
        # In future, we may support more variable length strings, so we act
        #  as if this is quota-controlled for forwards compatibility
        for metadata_item in metadata:
            k = metadata_item['key']
            v = metadata_item['value']
            if len(k) > 255 or len(v) > 255:
                pid = context.project_id
                msg = (_("Quota exceeeded for %(pid)s,"
                         " metadata property key or value too long") %
                       locals())
                LOG.warn(msg)
                raise quota.QuotaError(msg, "MetadataLimitExceeded")

        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': instance_type,
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name')
                    or instance.display_name == None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(
                _("Casting to scheduler for %(pid)s/%(uid)s's"
                  " instance %(instance_id)s") % locals())
            rpc.cast(
                context, FLAGS.scheduler_topic, {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files
                    }
                })

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Exemple #43
0
 def test_will_get_instance_type_by_id(self):
     default_instance_type = instance_types.get_default_instance_type()
     instance_type_id = default_instance_type['id']
     fetched = instance_types.get_instance_type(instance_type_id)
     self.assertEqual(default_instance_type, fetched)
Exemple #44
0
    def activate_bootloader(self, var, context, node, instance):
        tftp_root = var['tftp_root']
        image_path = var['image_path']

        deploy_aki_id = FLAGS.baremetal_deploy_kernel
        deploy_ari_id = FLAGS.baremetal_deploy_ramdisk
        aki_id = str(instance['kernel_id'])
        ari_id = str(instance['ramdisk_id'])

        images = [(deploy_aki_id, 'deploy_kernel'),
                  (deploy_ari_id, 'deploy_ramdisk'),
                  (aki_id, 'kernel'),
                  (ari_id, 'ramdisk'),
                  ]

        utils.ensure_tree(tftp_root)
        if FLAGS.baremetal_pxe_vlan_per_host:
            tftp_paths = [i[1] for i in images]
        else:
            tftp_paths = [os.path.join(str(instance['uuid']), i[1])
                    for i in images]
            utils.ensure_tree(
                    os.path.join(tftp_root, str(instance['uuid'])))

        LOG.debug("tftp_paths=%s", tftp_paths)

        def _cache_image_b(image_id, target):
            LOG.debug("fetching id=%s target=%s", image_id, target)
            _cache_image_x(context=context,
                           image_id=image_id,
                           target=target,
                           user_id=instance['user_id'],
                           project_id=instance['project_id'])

        for image, path in zip(images, tftp_paths):
            target = os.path.join(tftp_root, path)
            _cache_image_b(image[0], target)

        pxe_config_dir = os.path.join(tftp_root, 'pxelinux.cfg')
        pxe_config_path = os.path.join(pxe_config_dir,
                                       self._pxe_cfg_name(node))

        root_mb = instance['root_gb'] * 1024

        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        swap_mb = inst_type['swap']
        if swap_mb < 1024:
            swap_mb = 1024

        pxe_ip = None
        if FLAGS.baremetal_pxe_vlan_per_host:
            pxe_ip_id = bmdb.bm_pxe_ip_associate(context, node['id'])
            pxe_ip = bmdb.bm_pxe_ip_get(context, pxe_ip_id)

        deployment_key = _random_alnum(32)
        deployment_id = bmdb.bm_deployment_create(context, deployment_key,
                                                  image_path, pxe_config_path,
                                                  root_mb, swap_mb)
        deployment_iscsi_iqn = "iqn-%s" % str(instance['uuid'])
        iscsi_portal = None
        if FLAGS.baremetal_pxe_append_iscsi_portal:
            if pxe_ip:
                iscsi_portal = pxe_ip['server_address']
        pxeconf = _build_pxe_config(deployment_id,
                                    deployment_key,
                                    deployment_iscsi_iqn,
                                    deployment_aki_path=tftp_paths[0],
                                    deployment_ari_path=tftp_paths[1],
                                    aki_path=tftp_paths[2],
                                    ari_path=tftp_paths[3],
                                    iscsi_portal=iscsi_portal)
        utils.ensure_tree(pxe_config_dir)
        libvirt_utils.write_to_file(pxe_config_path, pxeconf)

        if FLAGS.baremetal_pxe_vlan_per_host:
            vlan_id = node['prov_vlan_id']
            server_address = pxe_ip['server_address']
            client_address = pxe_ip['address']
            _start_per_host_pxe_server(tftp_root, vlan_id,
                                       server_address, client_address)
Exemple #45
0
    def activate_bootloader(self, var, context, node, instance):
        tftp_root = var['tftp_root']
        image_path = var['image_path']

        deploy_aki_id = FLAGS.baremetal_deploy_kernel
        deploy_ari_id = FLAGS.baremetal_deploy_ramdisk
        aki_id = str(instance['kernel_id'])
        ari_id = str(instance['ramdisk_id'])

        images = [
            (deploy_aki_id, 'deploy_kernel'),
            (deploy_ari_id, 'deploy_ramdisk'),
            (aki_id, 'kernel'),
            (ari_id, 'ramdisk'),
        ]

        utils.ensure_tree(tftp_root)
        if FLAGS.baremetal_pxe_vlan_per_host:
            tftp_paths = [i[1] for i in images]
        else:
            tftp_paths = [
                os.path.join(str(instance['uuid']), i[1]) for i in images
            ]
            utils.ensure_tree(os.path.join(tftp_root, str(instance['uuid'])))

        LOG.debug("tftp_paths=%s", tftp_paths)

        def _cache_image_b(image_id, target):
            LOG.debug("fetching id=%s target=%s", image_id, target)
            _cache_image_x(context=context,
                           image_id=image_id,
                           target=target,
                           user_id=instance['user_id'],
                           project_id=instance['project_id'])

        for image, path in zip(images, tftp_paths):
            target = os.path.join(tftp_root, path)
            _cache_image_b(image[0], target)

        pxe_config_dir = os.path.join(tftp_root, 'pxelinux.cfg')
        pxe_config_path = os.path.join(pxe_config_dir,
                                       self._pxe_cfg_name(node))

        root_mb = instance['root_gb'] * 1024

        inst_type_id = instance['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        swap_mb = inst_type['swap']
        if swap_mb < 1024:
            swap_mb = 1024

        pxe_ip = None
        if FLAGS.baremetal_pxe_vlan_per_host:
            pxe_ip_id = bmdb.bm_pxe_ip_associate(context, node['id'])
            pxe_ip = bmdb.bm_pxe_ip_get(context, pxe_ip_id)

        deployment_key = _random_alnum(32)
        deployment_id = bmdb.bm_deployment_create(context, deployment_key,
                                                  image_path, pxe_config_path,
                                                  root_mb, swap_mb)
        deployment_iscsi_iqn = "iqn-%s" % str(instance['uuid'])
        iscsi_portal = None
        if FLAGS.baremetal_pxe_append_iscsi_portal:
            if pxe_ip:
                iscsi_portal = pxe_ip['server_address']
        pxeconf = _build_pxe_config(deployment_id,
                                    deployment_key,
                                    deployment_iscsi_iqn,
                                    deployment_aki_path=tftp_paths[0],
                                    deployment_ari_path=tftp_paths[1],
                                    aki_path=tftp_paths[2],
                                    ari_path=tftp_paths[3],
                                    iscsi_portal=iscsi_portal)
        utils.ensure_tree(pxe_config_dir)
        libvirt_utils.write_to_file(pxe_config_path, pxeconf)

        if FLAGS.baremetal_pxe_vlan_per_host:
            vlan_id = node['prov_vlan_id']
            server_address = pxe_ip['server_address']
            client_address = pxe_ip['address']
            _start_per_host_pxe_server(tftp_root, vlan_id, server_address,
                                       client_address)
Exemple #46
0
    def allocate_for_instance(self, context, **kwargs):
        """Called by compute when it is creating a new VM.

           There are three key tasks:
                - Determine the number and order of vNICs to create
                - Allocate IP addresses
                - Create ports on a Quantum network and attach vNICs.

           We support two approaches to determining vNICs:
                - By default, a VM gets a vNIC for any network belonging
                  to the VM's project, and a vNIC for any "global" network
                  that has a NULL project_id.  vNIC order is determined
                  by the network's 'priority' field.
                - If the 'os-create-server-ext' was used to create the VM,
                  only the networks in 'requested_networks' are used to
                  create vNICs, and the vNIC order is determiend by the
                  order in the requested_networks array.

           For each vNIC, use the FlatManager to create the entries
           in the virtual_interfaces table, contact Quantum to
           create a port and attachment the vNIC, and use the IPAM
           lib to allocate IP addresses.
        """
        instance_id = kwargs.pop("instance_id")
        instance_type_id = kwargs["instance_type_id"]
        host = kwargs.pop("host")
        project_id = kwargs.pop("project_id")
        LOG.debug(_("network allocations for instance %s"), project_id)

        requested_networks = kwargs.get("requested_networks")

        if requested_networks:
            net_proj_pairs = [(net_id, project_id) for (net_id, _i) in requested_networks]
        else:
            net_proj_pairs = self.ipam.get_project_and_global_net_ids(context, project_id)

        # Quantum may also know about networks that aren't in the networks
        # table so we need to query Quanutm for any tenant networks and add
        # them to net_proj_pairs.
        qnets = self.q_conn.get_networks(project_id)
        for qn in qnets["networks"]:
            pair = (qn["id"], project_id)
            if pair not in net_proj_pairs:
                net_proj_pairs.append(pair)

        # Create a port via quantum and attach the vif
        for (quantum_net_id, project_id) in net_proj_pairs:
            # FIXME(danwent): We'd like to have the manager be
            # completely decoupled from the nova networks table.
            # However, other parts of nova sometimes go behind our
            # back and access network data directly from the DB.  So
            # for now, the quantum manager knows that there is a nova
            # networks DB table and accesses it here.  updating the
            # virtual_interfaces table to use UUIDs would be one
            # solution, but this would require significant work
            # elsewhere.
            admin_context = context.elevated()

            # We may not be able to get a network_ref here if this network
            # isn't in the database (i.e. it came from Quantum).
            network_ref = db.network_get_by_uuid(admin_context, quantum_net_id)
            if network_ref is None:
                network_ref = {}
                network_ref = {
                    "uuid": quantum_net_id,
                    "project_id": project_id,
                    # NOTE(bgh): We need to document this somewhere but since
                    # we don't know the priority of any networks we get from
                    # quantum we just give them a priority of 0.  If its
                    # necessary to specify the order of the vifs and what
                    # network they map to then the user will have to use the
                    # OSCreateServer extension and specify them explicitly.
                    #
                    # In the future users will be able to tag quantum networks
                    # with a priority .. and at that point we can update the
                    # code here to reflect that.
                    "priority": 0,
                    "id": "NULL",
                    "label": "quantum-net-%s" % quantum_net_id,
                }

            vif_rec = self.add_virtual_interface(context, instance_id, network_ref["id"])

            # talk to Quantum API to create and attach port.
            instance = db.instance_get(context, instance_id)
            instance_type = instance_types.get_instance_type(instance_type_id)
            rxtx_factor = instance_type["rxtx_factor"]
            nova_id = self._get_nova_id(context)
            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
            self.q_conn.create_and_attach_port(
                q_tenant_id,
                quantum_net_id,
                vif_rec["uuid"],
                vm_id=instance["uuid"],
                rxtx_factor=rxtx_factor,
                nova_id=nova_id,
            )
            # Tell melange to allocate an IP
            ip = self.ipam.allocate_fixed_ip(context, project_id, quantum_net_id, vif_rec)
            # Set up/start the dhcp server for this network if necessary
            if FLAGS.quantum_use_dhcp:
                self.enable_dhcp(context, quantum_net_id, network_ref, vif_rec, project_id)
        return self.get_instance_nw_info(context, instance_id, instance_type_id, host)
Exemple #47
0
    def allocate_for_instance(self, context, **kwargs):
        """Called by compute when it is creating a new VM.

           There are three key tasks:
                - Determine the number and order of vNICs to create
                - Allocate IP addresses
                - Create ports on a Quantum network and attach vNICs.

           We support two approaches to determining vNICs:
                - By default, a VM gets a vNIC for any network belonging
                  to the VM's project, and a vNIC for any "global" network
                  that has a NULL project_id.  vNIC order is determined
                  by the network's 'priority' field.
                - If the 'os-create-server-ext' was used to create the VM,
                  only the networks in 'requested_networks' are used to
                  create vNICs, and the vNIC order is determiend by the
                  order in the requested_networks array.

           For each vNIC, use the FlatManager to create the entries
           in the virtual_interfaces table, contact Quantum to
           create a port and attachment the vNIC, and use the IPAM
           lib to allocate IP addresses.
        """
        instance_id = kwargs.pop('instance_id')
        instance_type_id = kwargs['instance_type_id']
        host = kwargs.pop('host')
        project_id = kwargs.pop('project_id')
        LOG.debug(_("network allocations for instance %s"), project_id)

        requested_networks = kwargs.get('requested_networks')

        if requested_networks:
            net_proj_pairs = [(net_id, project_id) \
                for (net_id, _i) in requested_networks]
        else:
            net_proj_pairs = self.ipam.get_project_and_global_net_ids(
                context, project_id)

        # Quantum may also know about networks that aren't in the networks
        # table so we need to query Quanutm for any tenant networks and add
        # them to net_proj_pairs.
        qnets = self.q_conn.get_networks(project_id)
        for qn in qnets['networks']:
            pair = (qn['id'], project_id)
            if pair not in net_proj_pairs:
                net_proj_pairs.append(pair)

        # Create a port via quantum and attach the vif
        for (quantum_net_id, project_id) in net_proj_pairs:
            # FIXME(danwent): We'd like to have the manager be
            # completely decoupled from the nova networks table.
            # However, other parts of nova sometimes go behind our
            # back and access network data directly from the DB.  So
            # for now, the quantum manager knows that there is a nova
            # networks DB table and accesses it here.  updating the
            # virtual_interfaces table to use UUIDs would be one
            # solution, but this would require significant work
            # elsewhere.
            admin_context = context.elevated()

            # We may not be able to get a network_ref here if this network
            # isn't in the database (i.e. it came from Quantum).
            network_ref = db.network_get_by_uuid(admin_context, quantum_net_id)
            if network_ref is None:
                network_ref = {}
                network_ref = {
                    "uuid": quantum_net_id,
                    "project_id": project_id,
                    # NOTE(bgh): We need to document this somewhere but since
                    # we don't know the priority of any networks we get from
                    # quantum we just give them a priority of 0.  If its
                    # necessary to specify the order of the vifs and what
                    # network they map to then the user will have to use the
                    # OSCreateServer extension and specify them explicitly.
                    #
                    # In the future users will be able to tag quantum networks
                    # with a priority .. and at that point we can update the
                    # code here to reflect that.
                    "priority": 0,
                    "id": 'NULL',
                    "label": "quantum-net-%s" % quantum_net_id
                }

            vif_rec = self.add_virtual_interface(context, instance_id,
                                                 network_ref['id'])

            # talk to Quantum API to create and attach port.
            instance = db.instance_get(context, instance_id)
            instance_type = instance_types.get_instance_type(instance_type_id)
            rxtx_factor = instance_type['rxtx_factor']
            nova_id = self._get_nova_id(instance)
            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
            # Tell the ipam library to allocate an IP
            ip = self.ipam.allocate_fixed_ip(context, project_id,
                                             quantum_net_id, vif_rec)
            pairs = []
            # Set up port security if enabled
            if FLAGS.quantum_use_port_security:
                pairs = [{'mac_address': vif_rec['address'], 'ip_address': ip}]
            self.q_conn.create_and_attach_port(q_tenant_id,
                                               quantum_net_id,
                                               vif_rec['uuid'],
                                               vm_id=instance['uuid'],
                                               rxtx_factor=rxtx_factor,
                                               nova_id=nova_id,
                                               allowed_address_pairs=pairs)
            # Set up/start the dhcp server for this network if necessary
            if FLAGS.quantum_use_dhcp:
                self.enable_dhcp(context, quantum_net_id, network_ref, vif_rec,
                                 project_id)
        return self.get_instance_nw_info(context, instance_id,
                                         instance_type_id, host)
    def create_vm(cls, session, instance, kernel, ramdisk, use_pv_kernel=False):
        """Create a VM record.  Returns a Deferred that gives the new
        VM reference.
        the use_pv_kernel flag indicates whether the guest is HVM or PV

        There are 3 scenarios:

            1. Using paravirtualization,  kernel passed in

            2. Using paravirtualization, kernel within the image

            3. Using hardware virtualization
        """

        inst_type_id = instance.instance_type_id
        instance_type = instance_types.get_instance_type(inst_type_id)
        mem = str(long(instance_type["memory_mb"]) * 1024 * 1024)
        vcpus = str(instance_type["vcpus"])
        rec = {
            "actions_after_crash": "destroy",
            "actions_after_reboot": "restart",
            "actions_after_shutdown": "destroy",
            "affinity": "",
            "blocked_operations": {},
            "ha_always_run": False,
            "ha_restart_priority": "",
            "HVM_boot_params": {},
            "HVM_boot_policy": "",
            "is_a_template": False,
            "memory_dynamic_min": mem,
            "memory_dynamic_max": mem,
            "memory_static_min": "0",
            "memory_static_max": mem,
            "memory_target": mem,
            "name_description": "",
            "name_label": instance.name,
            "other_config": {"allowvssprovider": False},
            "other_config": {},
            "PCI_bus": "",
            "platform": {"acpi": "true", "apic": "true", "pae": "true", "viridian": "true", "timeoffset": "0"},
            "PV_args": "",
            "PV_bootloader": "",
            "PV_bootloader_args": "",
            "PV_kernel": "",
            "PV_legacy_args": "",
            "PV_ramdisk": "",
            "recommendations": "",
            "tags": [],
            "user_version": "0",
            "VCPUs_at_startup": vcpus,
            "VCPUs_max": vcpus,
            "VCPUs_params": {},
            "xenstore_data": {},
        }
        # Complete VM configuration record according to the image type
        # non-raw/raw with PV kernel/raw in HVM mode
        if use_pv_kernel:
            rec["platform"]["nx"] = "false"
            if instance.kernel_id:
                # 1. Kernel explicitly passed in, use that
                rec["PV_args"] = "root=/dev/xvda1"
                rec["PV_kernel"] = kernel
                rec["PV_ramdisk"] = ramdisk
            else:
                # 2. Use kernel within the image
                rec["PV_bootloader"] = "pygrub"
        else:
            # 3. Using hardware virtualization
            rec["platform"]["nx"] = "true"
            rec["HVM_boot_params"] = {"order": "dc"}
            rec["HVM_boot_policy"] = "BIOS order"

        LOG.debug(_("Created VM %s..."), instance.name)
        vm_ref = session.call_xenapi("VM.create", rec)
        instance_name = instance.name
        LOG.debug(_("Created VM %(instance_name)s as %(vm_ref)s.") % locals())
        return vm_ref
Exemple #49
0
    def _select_machine(self, context, instance):
        inst_type = instance_types.get_instance_type(
            instance['instance_type_id'])

        bmm_found = None
        reuse = False

        # create a non autocommit session
        session = get_session_dodai(False)
        session.begin()
        try:
            bmms = db.bmm_get_all_by_instance_type(context, inst_type["name"],
                                                   session)
            if instance[
                    "availability_zone"] == "resource_pool":  #Add a machine to resource pool.
                for bmm in bmms:
                    if bmm["availability_zone"] != "resource_pool":
                        continue

                    if bmm["status"] != "inactive":
                        continue

                    bmm_found = bmm
                    break
            else:
                for bmm in bmms:
                    if bmm["availability_zone"] != "resource_pool":
                        continue

                    if bmm["status"] != "active":
                        continue

                    instance_ref = db.instance_get(context, bmm["instance_id"])
                    if instance_ref["image_ref"] != instance["image_ref"]:
                        continue

                    bmm_found = bmm
                    reuse = True
                    break

                if not bmm_found:
                    for bmm in bmms:
                        if bmm["status"] == "used" or bmm[
                                "status"] == "processing":
                            continue

                        bmm_found = bmm
                        reuse = False
                        break

            if bmm_found:
                db.bmm_update(context, bmm_found["id"],
                              {"status": "processing"}, session)
        except Exception as ex:
            LOG.exception(ex)
            session.rollback()
            raise exception.BareMetalMachineUnavailable()

        session.commit()

        if bmm_found:
            return bmm_found, reuse

        raise exception.BareMetalMachineUnavailable()
Exemple #50
0
    def create_vm(cls, session, instance, kernel, ramdisk,
                  use_pv_kernel=False):
        """Create a VM record.  Returns a Deferred that gives the new
        VM reference.
        the use_pv_kernel flag indicates whether the guest is HVM or PV

        There are 3 scenarios:

            1. Using paravirtualization,  kernel passed in

            2. Using paravirtualization, kernel within the image

            3. Using hardware virtualization
        """

        inst_type_id = instance.instance_type_id
        instance_type = instance_types.get_instance_type(inst_type_id)
        mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
        vcpus = str(instance_type['vcpus'])
        rec = {
            'actions_after_crash': 'destroy',
            'actions_after_reboot': 'restart',
            'actions_after_shutdown': 'destroy',
            'affinity': '',
            'blocked_operations': {},
            'ha_always_run': False,
            'ha_restart_priority': '',
            'HVM_boot_params': {},
            'HVM_boot_policy': '',
            'is_a_template': False,
            'memory_dynamic_min': mem,
            'memory_dynamic_max': mem,
            'memory_static_min': '0',
            'memory_static_max': mem,
            'memory_target': mem,
            'name_description': '',
            'name_label': instance.name,
            'other_config': {'allowvssprovider': False},
            'other_config': {},
            'PCI_bus': '',
            'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
                         'viridian': 'true', 'timeoffset': '0'},
            'PV_args': '',
            'PV_bootloader': '',
            'PV_bootloader_args': '',
            'PV_kernel': '',
            'PV_legacy_args': '',
            'PV_ramdisk': '',
            'recommendations': '',
            'tags': [],
            'user_version': '0',
            'VCPUs_at_startup': vcpus,
            'VCPUs_max': vcpus,
            'VCPUs_params': {},
            'xenstore_data': {}}
        # Complete VM configuration record according to the image type
        # non-raw/raw with PV kernel/raw in HVM mode
        if use_pv_kernel:
            rec['platform']['nx'] = 'false'
            if instance.kernel_id:
                # 1. Kernel explicitly passed in, use that
                rec['PV_args'] = 'root=/dev/xvda1'
                rec['PV_kernel'] = kernel
                rec['PV_ramdisk'] = ramdisk
            else:
                # 2. Use kernel within the image
                rec['PV_bootloader'] = 'pygrub'
        else:
            # 3. Using hardware virtualization
            rec['platform']['nx'] = 'true'
            rec['HVM_boot_params'] = {'order': 'dc'}
            rec['HVM_boot_policy'] = 'BIOS order'

        LOG.debug(_('Created VM %s...'), instance.name)
        vm_ref = session.call_xenapi('VM.create', rec)
        instance_name = instance.name
        LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
        return vm_ref
Exemple #51
0
    def _create_image(self, context, inst, xml, suffix='',
                      disk_images=None, network_info=None,
                      block_device_info=None):
        if not suffix:
            suffix = ''

        # syntactic nicety
        def basepath(fname='', suffix=suffix):
            return os.path.join(FLAGS.instances_path,
                                inst['name'],
                                fname + suffix)

        # ensure directories exist and are writable
        libvirt_utils.ensure_tree(basepath(suffix=''))
        utils.execute('chmod', '0777', basepath(suffix=''))

        LOG.info(_('instance %s: Creating image'), inst['name'])

        if FLAGS.baremetal_type == 'lxc':
            container_dir = '%s/rootfs' % basepath(suffix='')
            libvirt_utils.ensure_tree(container_dir)

        # NOTE(vish): No need add the suffix to console.log
        libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)

        if not disk_images:
            disk_images = {'image_id': inst['image_ref'],
                           'kernel_id': inst['kernel_id'],
                           'ramdisk_id': inst['ramdisk_id']}

        if disk_images['kernel_id']:
            fname = disk_images['kernel_id']
            self._cache_image(fn=libvirt_utils.fetch_image,
                              context=context,
                              target=basepath('kernel'),
                              fname=fname,
                              cow=False,
                              image_id=disk_images['kernel_id'],
                              user_id=inst['user_id'],
                              project_id=inst['project_id'])
            if disk_images['ramdisk_id']:
                fname = disk_images['ramdisk_id']
                self._cache_image(fn=libvirt_utils.fetch_image,
                                  context=context,
                                  target=basepath('ramdisk'),
                                  fname=fname,
                                  cow=False,
                                  image_id=disk_images['ramdisk_id'],
                                  user_id=inst['user_id'],
                                  project_id=inst['project_id'])

        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
        size = inst['root_gb'] * 1024 * 1024 * 1024

        inst_type_id = inst['instance_type_id']
        inst_type = instance_types.get_instance_type(inst_type_id)
        if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
            size = None
            root_fname += "_sm"
        else:
            root_fname += "_%d" % inst['root_gb']

        self._cache_image(fn=libvirt_utils.fetch_image,
                          context=context,
                          target=basepath('root'),
                          fname=root_fname,
                          cow=False,  # FLAGS.use_cow_images,
                          image_id=disk_images['image_id'],
                          user_id=inst['user_id'],
                          project_id=inst['project_id'],
                          size=size)

        # For now, we assume that if we're not using a kernel, we're using a
        # partitioned disk image where the target partition is the first
        # partition
        target_partition = None
        if not inst['kernel_id']:
            target_partition = "1"

        if FLAGS.baremetal_type == 'lxc':
            target_partition = None

        if inst['key_data']:
            key = str(inst['key_data'])
        else:
            key = None
        net = None

        nets = []
        ifc_template = open(FLAGS.injected_network_template).read()
        ifc_num = -1
        have_injected_networks = False
        admin_context = nova_context.get_admin_context()
        for (network_ref, mapping) in network_info:
            ifc_num += 1

            if not network_ref['injected']:
                continue

            have_injected_networks = True
            address = mapping['ips'][0]['ip']
            netmask = mapping['ips'][0]['netmask']
            address_v6 = None
            gateway_v6 = None
            netmask_v6 = None
            if FLAGS.use_ipv6:
                address_v6 = mapping['ip6s'][0]['ip']
                netmask_v6 = mapping['ip6s'][0]['netmask']
                gateway_v6 = mapping['gateway_v6']
            net_info = {'name': 'eth%d' % ifc_num,
                   'address': address,
                   'netmask': netmask,
                   'gateway': mapping['gateway'],
                   'broadcast': mapping['broadcast'],
                   'dns': ' '.join(mapping['dns']),
                   'address_v6': address_v6,
                   'gateway_v6': gateway_v6,
                   'netmask_v6': netmask_v6}
            nets.append(net_info)

        if have_injected_networks:
            net = str(Template(ifc_template,
                               searchList=[{'interfaces': nets,
                                            'use_ipv6': FLAGS.use_ipv6}]))

        metadata = inst.get('metadata')
        if any((key, net, metadata)):
            inst_name = inst['name']

            injection_path = basepath('root')
            img_id = inst.image_ref
            disable_auto_fsck = True

            for injection in ('metadata', 'key', 'net'):
                if locals()[injection]:
                    LOG.info(_('instance %(inst_name)s: injecting '
                               '%(injection)s into image %(img_id)s'
                               % locals()))
            try:
                disk.inject_data(injection_path, key, net, metadata,
                                 partition=target_partition,
                                 use_cow=False,  # FLAGS.use_cow_images,
                                 disable_auto_fsck=disable_auto_fsck)

            except Exception as e:
                # This could be a windows image, or a vmdk format disk
                LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
                        ' data into image %(img_id)s (%(e)s)') % locals())
Exemple #52
0
    def create_vm(cls, session, instance, kernel, ramdisk,
                  use_pv_kernel=False):
        """Create a VM record.  Returns a Deferred that gives the new
        VM reference.
        the use_pv_kernel flag indicates whether the guest is HVM or PV

        There are 3 scenarios:

            1. Using paravirtualization,  kernel passed in

            2. Using paravirtualization, kernel within the image

            3. Using hardware virtualization
        """

        inst_type_id = instance.instance_type_id
        instance_type = instance_types.get_instance_type(inst_type_id)
        mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
        vcpus = str(instance_type['vcpus'])
        rec = {
            'actions_after_crash': 'destroy',
            'actions_after_reboot': 'restart',
            'actions_after_shutdown': 'destroy',
            'affinity': '',
            'blocked_operations': {},
            'ha_always_run': False,
            'ha_restart_priority': '',
            'HVM_boot_params': {},
            'HVM_boot_policy': '',
            'is_a_template': False,
            'memory_dynamic_min': mem,
            'memory_dynamic_max': mem,
            'memory_static_min': '0',
            'memory_static_max': mem,
            'memory_target': mem,
            'name_description': '',
            'name_label': instance.name,
            'other_config': {'allowvssprovider': False},
            'other_config': {},
            'PCI_bus': '',
            'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
                         'viridian': 'true', 'timeoffset': '0'},
            'PV_args': '',
            'PV_bootloader': '',
            'PV_bootloader_args': '',
            'PV_kernel': '',
            'PV_legacy_args': '',
            'PV_ramdisk': '',
            'recommendations': '',
            'tags': [],
            'user_version': '0',
            'VCPUs_at_startup': vcpus,
            'VCPUs_max': vcpus,
            'VCPUs_params': {},
            'xenstore_data': {}}
        # Complete VM configuration record according to the image type
        # non-raw/raw with PV kernel/raw in HVM mode
        if use_pv_kernel:
            rec['platform']['nx'] = 'false'
            if instance.kernel_id:
                # 1. Kernel explicitly passed in, use that
                rec['PV_args'] = 'root=/dev/xvda1'
                rec['PV_kernel'] = kernel
                rec['PV_ramdisk'] = ramdisk
            else:
                # 2. Use kernel within the image
                rec['PV_bootloader'] = 'pygrub'
        else:
            # 3. Using hardware virtualization
            rec['platform']['nx'] = 'true'
            rec['HVM_boot_params'] = {'order': 'dc'}
            rec['HVM_boot_policy'] = 'BIOS order'

        LOG.debug(_('Created VM %s...'), instance.name)
        vm_ref = session.call_xenapi('VM.create', rec)
        instance_name = instance.name
        LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
        return vm_ref
Exemple #53
0
Fichier : api.py Projet : yosh/nova
    def create(self, context, instance_type,
               image_id, kernel_id=None, ramdisk_id=None,
               min_count=1, max_count=1,
               display_name='', display_description='',
               key_name=None, key_data=None, security_group='default',
               availability_zone=None, user_data=None, metadata=[],
               onset_files=None):
        """Create the number of instances requested if quota and
        other arguments check out ok."""

        type_data = instance_types.get_instance_type(instance_type)
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s,"
                    " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(_("Instance quota exceeded. You can only "
                                     "run %s more instances of this type.") %
                                   num_instances, "InstanceLimitExceeded")

        num_metadata = len(metadata)
        quota_metadata = quota.allowed_metadata_items(context, num_metadata)
        if quota_metadata < num_metadata:
            pid = context.project_id
            msg = (_("Quota exceeeded for %(pid)s,"
                     " tried to set %(num_metadata)s metadata properties")
                   % locals())
            LOG.warn(msg)
            raise quota.QuotaError(msg, "MetadataLimitExceeded")

        # Because metadata is stored in the DB, we hard-code the size limits
        # In future, we may support more variable length strings, so we act
        #  as if this is quota-controlled for forwards compatibility
        for metadata_item in metadata:
            k = metadata_item['key']
            v = metadata_item['value']
            if len(k) > 255 or len(v) > 255:
                pid = context.project_id
                msg = (_("Quota exceeeded for %(pid)s,"
                         " metadata property key or value too long")
                       % locals())
                LOG.warn(msg)
                raise quota.QuotaError(msg, "MetadataLimitExceeded")

        image = self.image_service.show(context, image_id)
        if kernel_id is None:
            kernel_id = image.get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image.get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" %
                       (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context,
                                                  context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': instance_type,
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone}
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated,
                                                    instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name') or
                    instance.display_name == None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
                    " instance %(instance_id)s") % locals())
            rpc.cast(context,
                     FLAGS.scheduler_topic,
                     {"method": "run_instance",
                      "args": {"topic": FLAGS.compute_topic,
                               "instance_id": instance_id,
                               "availability_zone": availability_zone,
                               "onset_files": onset_files}})

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]